summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/pcre2/src/sljit
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/pcre2/src/sljit')
-rw-r--r--src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorApple.c133
-rw-r--r--src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorCore.c330
-rw-r--r--src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorFreeBSD.c89
-rw-r--r--src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorPosix.c62
-rw-r--r--src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorWindows.c40
-rw-r--r--src/3rdparty/pcre2/src/sljit/allocator_src/sljitProtExecAllocatorNetBSD.c72
-rw-r--r--src/3rdparty/pcre2/src/sljit/allocator_src/sljitProtExecAllocatorPosix.c172
-rw-r--r--src/3rdparty/pcre2/src/sljit/allocator_src/sljitWXExecAllocatorPosix.c141
-rw-r--r--src/3rdparty/pcre2/src/sljit/allocator_src/sljitWXExecAllocatorWindows.c102
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitConfig.h36
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitConfigCPU.h188
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h524
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c16
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitLir.c2520
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitLir.h1818
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c3056
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c2127
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c2817
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c809
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c563
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c2934
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativePPC_32.c264
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c400
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c1649
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_32.c142
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_64.c222
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_common.c3013
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeS390X.c2513
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c107
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c287
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c1843
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c1306
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c3553
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitProtExecAllocator.c6
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitUtils.c31
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitWXExecAllocator.c53
36 files changed, 25904 insertions, 8034 deletions
diff --git a/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorApple.c b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorApple.c
new file mode 100644
index 0000000000..95b9842fa9
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorApple.c
@@ -0,0 +1,133 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+/*
+ On macOS systems, returns MAP_JIT if it is defined _and_ we're running on a
+ version where it's OK to have more than one JIT block or where MAP_JIT is
+ required.
+ On non-macOS systems, returns MAP_JIT if it is defined.
+*/
+#include <TargetConditionals.h>
+
+#if (defined(TARGET_OS_OSX) && TARGET_OS_OSX) || (TARGET_OS_MAC && !TARGET_OS_IPHONE)
+
+#if defined(SLJIT_CONFIG_X86) && SLJIT_CONFIG_X86
+
+#include <sys/utsname.h>
+#include <stdlib.h>
+
+#define SLJIT_MAP_JIT (get_map_jit_flag())
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
+
+static SLJIT_INLINE int get_map_jit_flag(void)
+{
+ size_t page_size;
+ void *ptr;
+ struct utsname name;
+ static int map_jit_flag = -1;
+
+ if (map_jit_flag < 0) {
+ map_jit_flag = 0;
+ uname(&name);
+
+ /* Kernel version for 10.14.0 (Mojave) or later */
+ if (atoi(name.release) >= 18) {
+ page_size = get_page_alignment() + 1;
+ /* Only use MAP_JIT if a hardened runtime is used */
+ ptr = mmap(NULL, page_size, PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (ptr != MAP_FAILED)
+ munmap(ptr, page_size);
+ else
+ map_jit_flag = MAP_JIT;
+ }
+ }
+ return map_jit_flag;
+}
+
+#elif defined(SLJIT_CONFIG_ARM) && SLJIT_CONFIG_ARM
+
+#include <AvailabilityMacros.h>
+#include <pthread.h>
+
+#define SLJIT_MAP_JIT (MAP_JIT)
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec) \
+ apple_update_wx_flags(enable_exec)
+
+static SLJIT_INLINE void apple_update_wx_flags(sljit_s32 enable_exec)
+{
+#if MAC_OS_X_VERSION_MIN_REQUIRED < 110000
+ if (__builtin_available(macos 11, *))
+#endif /* BigSur */
+ pthread_jit_write_protect_np(enable_exec);
+}
+
+#elif defined(SLJIT_CONFIG_PPC) && SLJIT_CONFIG_PPC
+
+#define SLJIT_MAP_JIT (0)
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
+
+#else
+#error "Unsupported architecture"
+#endif /* SLJIT_CONFIG */
+
+#else /* !TARGET_OS_OSX */
+
+#ifdef MAP_JIT
+#define SLJIT_MAP_JIT (MAP_JIT)
+#else
+#define SLJIT_MAP_JIT (0)
+#endif
+
+#endif /* TARGET_OS_OSX */
+
+static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
+{
+ void *retval;
+ int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
+ int flags = MAP_PRIVATE;
+ int fd = -1;
+
+ flags |= MAP_ANON | SLJIT_MAP_JIT;
+
+ retval = mmap(NULL, size, prot, flags, fd, 0);
+ if (retval == MAP_FAILED)
+ return NULL;
+
+ SLJIT_UPDATE_WX_FLAGS(retval, (uint8_t *)retval + size, 0);
+
+ return retval;
+}
+
+static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
+{
+ munmap(chunk, size);
+}
+
+#include "sljitExecAllocatorCore.c"
diff --git a/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorCore.c b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorCore.c
new file mode 100644
index 0000000000..6cd391104c
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorCore.c
@@ -0,0 +1,330 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file contains a simple executable memory allocator
+
+ It is assumed, that executable code blocks are usually medium (or sometimes
+ large) memory blocks, and the allocator is not too frequently called (less
+ optimized than other allocators). Thus, using it as a generic allocator is
+ not suggested.
+
+ How does it work:
+ Memory is allocated in continuous memory areas called chunks by alloc_chunk()
+ Chunk format:
+ [ block ][ block ] ... [ block ][ block terminator ]
+
+ All blocks and the block terminator is started with block_header. The block
+ header contains the size of the previous and the next block. These sizes
+ can also contain special values.
+ Block size:
+ 0 - The block is a free_block, with a different size member.
+ 1 - The block is a block terminator.
+ n - The block is used at the moment, and the value contains its size.
+ Previous block size:
+ 0 - This is the first block of the memory chunk.
+ n - The size of the previous block.
+
+ Using these size values we can go forward or backward on the block chain.
+ The unused blocks are stored in a chain list pointed by free_blocks. This
+ list is useful if we need to find a suitable memory area when the allocator
+ is called.
+
+ When a block is freed, the new free block is connected to its adjacent free
+ blocks if possible.
+
+ [ free block ][ used block ][ free block ]
+ and "used block" is freed, the three blocks are connected together:
+ [ one big free block ]
+*/
+
+/* Expected functions:
+ alloc_chunk / free_chunk :
+ * allocate executable system memory chunks
+ * the size is always divisible by CHUNK_SIZE
+ SLJIT_ALLOCATOR_LOCK / SLJIT_ALLOCATOR_UNLOCK :
+ * provided as part of sljitUtils
+ * only the allocator requires this lock, sljit is fully thread safe
+ as it only uses local variables
+
+ Supported defines:
+ SLJIT_HAS_CHUNK_HEADER - (optional) sljit_chunk_header is defined
+ SLJIT_HAS_EXECUTABLE_OFFSET - (optional) has executable offset data
+ SLJIT_UPDATE_WX_FLAGS - (optional) update WX flags
+*/
+
+#ifdef SLJIT_HAS_CHUNK_HEADER
+#define CHUNK_HEADER_SIZE (sizeof(struct sljit_chunk_header))
+#else /* !SLJIT_HAS_CHUNK_HEADER */
+#define CHUNK_HEADER_SIZE 0
+#endif /* SLJIT_HAS_CHUNK_HEADER */
+
+#ifndef SLJIT_UPDATE_WX_FLAGS
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
+#endif /* SLJIT_UPDATE_WX_FLAGS */
+
+#ifndef CHUNK_SIZE
+/* 64 KByte if not specified. */
+#define CHUNK_SIZE (sljit_uw)0x10000
+#endif /* CHUNK_SIZE */
+
+struct block_header {
+ sljit_uw size;
+ sljit_uw prev_size;
+#ifdef SLJIT_HAS_EXECUTABLE_OFFSET
+ sljit_sw executable_offset;
+#endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
+};
+
+struct free_block {
+ struct block_header header;
+ struct free_block *next;
+ struct free_block *prev;
+ sljit_uw size;
+};
+
+#define AS_BLOCK_HEADER(base, offset) \
+ ((struct block_header*)(((sljit_u8*)base) + offset))
+#define AS_FREE_BLOCK(base, offset) \
+ ((struct free_block*)(((sljit_u8*)base) + offset))
+#define MEM_START(base) ((void*)((base) + 1))
+#define CHUNK_MASK (~(CHUNK_SIZE - 1))
+#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7u) & ~(sljit_uw)7)
+#define CHUNK_EXTRA_SIZE (sizeof(struct block_header) + CHUNK_HEADER_SIZE)
+
+static struct free_block* free_blocks;
+static sljit_uw allocated_size;
+static sljit_uw total_size;
+
+static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
+{
+ free_block->header.size = 0;
+ free_block->size = size;
+
+ free_block->next = free_blocks;
+ free_block->prev = NULL;
+ if (free_blocks)
+ free_blocks->prev = free_block;
+ free_blocks = free_block;
+}
+
+static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
+{
+ if (free_block->next)
+ free_block->next->prev = free_block->prev;
+
+ if (free_block->prev)
+ free_block->prev->next = free_block->next;
+ else {
+ SLJIT_ASSERT(free_blocks == free_block);
+ free_blocks = free_block->next;
+ }
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
+{
+ struct block_header *header;
+ struct block_header *next_header;
+ struct free_block *free_block;
+ sljit_uw chunk_size;
+
+#ifdef SLJIT_HAS_CHUNK_HEADER
+ struct sljit_chunk_header *chunk_header;
+#else /* !SLJIT_HAS_CHUNK_HEADER */
+ void *chunk_header;
+#endif /* SLJIT_HAS_CHUNK_HEADER */
+
+#ifdef SLJIT_HAS_EXECUTABLE_OFFSET
+ sljit_sw executable_offset;
+#endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
+
+ if (size < (64 - sizeof(struct block_header)))
+ size = (64 - sizeof(struct block_header));
+ size = ALIGN_SIZE(size);
+
+ SLJIT_ALLOCATOR_LOCK();
+ free_block = free_blocks;
+ while (free_block) {
+ if (free_block->size >= size) {
+ chunk_size = free_block->size;
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
+ if (chunk_size > size + 64) {
+ /* We just cut a block from the end of the free block. */
+ chunk_size -= size;
+ free_block->size = chunk_size;
+ header = AS_BLOCK_HEADER(free_block, chunk_size);
+ header->prev_size = chunk_size;
+#ifdef SLJIT_HAS_EXECUTABLE_OFFSET
+ header->executable_offset = free_block->header.executable_offset;
+#endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
+ AS_BLOCK_HEADER(header, size)->prev_size = size;
+ }
+ else {
+ sljit_remove_free_block(free_block);
+ header = (struct block_header*)free_block;
+ size = chunk_size;
+ }
+ allocated_size += size;
+ header->size = size;
+ SLJIT_ALLOCATOR_UNLOCK();
+ return MEM_START(header);
+ }
+ free_block = free_block->next;
+ }
+
+ chunk_size = (size + CHUNK_EXTRA_SIZE + CHUNK_SIZE - 1) & CHUNK_MASK;
+
+ chunk_header = alloc_chunk(chunk_size);
+ if (!chunk_header) {
+ SLJIT_ALLOCATOR_UNLOCK();
+ return NULL;
+ }
+
+#ifdef SLJIT_HAS_EXECUTABLE_OFFSET
+ executable_offset = (sljit_sw)((sljit_u8*)chunk_header->executable - (sljit_u8*)chunk_header);
+#endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
+
+ chunk_size -= CHUNK_EXTRA_SIZE;
+ total_size += chunk_size;
+
+ header = (struct block_header*)(((sljit_u8*)chunk_header) + CHUNK_HEADER_SIZE);
+
+ header->prev_size = 0;
+#ifdef SLJIT_HAS_EXECUTABLE_OFFSET
+ header->executable_offset = executable_offset;
+#endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
+
+ if (chunk_size > size + 64) {
+ /* Cut the allocated space into a free and a used block. */
+ allocated_size += size;
+ header->size = size;
+ chunk_size -= size;
+
+ free_block = AS_FREE_BLOCK(header, size);
+ free_block->header.prev_size = size;
+#ifdef SLJIT_HAS_EXECUTABLE_OFFSET
+ free_block->header.executable_offset = executable_offset;
+#endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
+ sljit_insert_free_block(free_block, chunk_size);
+ next_header = AS_BLOCK_HEADER(free_block, chunk_size);
+ }
+ else {
+ /* All space belongs to this allocation. */
+ allocated_size += chunk_size;
+ header->size = chunk_size;
+ next_header = AS_BLOCK_HEADER(header, chunk_size);
+ }
+ SLJIT_ALLOCATOR_UNLOCK();
+ next_header->size = 1;
+ next_header->prev_size = chunk_size;
+#ifdef SLJIT_HAS_EXECUTABLE_OFFSET
+ next_header->executable_offset = executable_offset;
+#endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
+ return MEM_START(header);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
+{
+ struct block_header *header;
+ struct free_block* free_block;
+
+ SLJIT_ALLOCATOR_LOCK();
+ header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
+#ifdef SLJIT_HAS_EXECUTABLE_OFFSET
+ header = AS_BLOCK_HEADER(header, -header->executable_offset);
+#endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
+ allocated_size -= header->size;
+
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
+
+ /* Connecting free blocks together if possible. */
+
+ /* If header->prev_size == 0, free_block will equal to header.
+ In this case, free_block->header.size will be > 0. */
+ free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
+ if (SLJIT_UNLIKELY(!free_block->header.size)) {
+ free_block->size += header->size;
+ header = AS_BLOCK_HEADER(free_block, free_block->size);
+ header->prev_size = free_block->size;
+ }
+ else {
+ free_block = (struct free_block*)header;
+ sljit_insert_free_block(free_block, header->size);
+ }
+
+ header = AS_BLOCK_HEADER(free_block, free_block->size);
+ if (SLJIT_UNLIKELY(!header->size)) {
+ free_block->size += ((struct free_block*)header)->size;
+ sljit_remove_free_block((struct free_block*)header);
+ header = AS_BLOCK_HEADER(free_block, free_block->size);
+ header->prev_size = free_block->size;
+ }
+
+ /* The whole chunk is free. */
+ if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
+ /* If this block is freed, we still have (allocated_size / 2) free space. */
+ if (total_size - free_block->size > (allocated_size * 3 / 2)) {
+ total_size -= free_block->size;
+ sljit_remove_free_block(free_block);
+ free_chunk(free_block, free_block->size + CHUNK_EXTRA_SIZE);
+ }
+ }
+
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
+ SLJIT_ALLOCATOR_UNLOCK();
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
+{
+ struct free_block* free_block;
+ struct free_block* next_free_block;
+
+ SLJIT_ALLOCATOR_LOCK();
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
+
+ free_block = free_blocks;
+ while (free_block) {
+ next_free_block = free_block->next;
+ if (!free_block->header.prev_size &&
+ AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
+ total_size -= free_block->size;
+ sljit_remove_free_block(free_block);
+ free_chunk(free_block, free_block->size + CHUNK_EXTRA_SIZE);
+ }
+ free_block = next_free_block;
+ }
+
+ SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
+ SLJIT_ALLOCATOR_UNLOCK();
+}
+
+#ifdef SLJIT_HAS_EXECUTABLE_OFFSET
+SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr)
+{
+ return ((struct block_header *)(ptr))[-1].executable_offset;
+}
+#endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
diff --git a/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorFreeBSD.c b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorFreeBSD.c
new file mode 100644
index 0000000000..3b93a4df76
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorFreeBSD.c
@@ -0,0 +1,89 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/mman.h>
+#include <sys/procctl.h>
+
+#ifdef PROC_WXMAP_CTL
+static SLJIT_INLINE int sljit_is_wx_block(void)
+{
+ static int wx_block = -1;
+ if (wx_block < 0) {
+ int sljit_wx_enable = PROC_WX_MAPPINGS_PERMIT;
+ wx_block = !!procctl(P_PID, 0, PROC_WXMAP_CTL, &sljit_wx_enable);
+ }
+ return wx_block;
+}
+
+#define SLJIT_IS_WX_BLOCK sljit_is_wx_block()
+#else /* !PROC_WXMAP_CTL */
+#define SLJIT_IS_WX_BLOCK (1)
+#endif /* PROC_WXMAP_CTL */
+
+static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
+{
+ void *retval;
+ int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
+ int flags = MAP_PRIVATE;
+ int fd = -1;
+
+#ifdef PROT_MAX
+ prot |= PROT_MAX(prot);
+#endif
+
+#ifdef MAP_ANON
+ flags |= MAP_ANON;
+#else /* !MAP_ANON */
+ if (SLJIT_UNLIKELY((dev_zero < 0) && open_dev_zero()))
+ return NULL;
+
+ fd = dev_zero;
+#endif /* MAP_ANON */
+
+retry:
+ retval = mmap(NULL, size, prot, flags, fd, 0);
+ if (retval == MAP_FAILED) {
+ if (!SLJIT_IS_WX_BLOCK)
+ goto retry;
+
+ return NULL;
+ }
+
+ /* HardenedBSD's mmap lies, so check permissions again. */
+ if (mprotect(retval, size, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
+ munmap(retval, size);
+ return NULL;
+ }
+
+ return retval;
+}
+
+static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
+{
+ munmap(chunk, size);
+}
+
+#include "sljitExecAllocatorCore.c"
diff --git a/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorPosix.c b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorPosix.c
new file mode 100644
index 0000000000..a775f5629a
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorPosix.c
@@ -0,0 +1,62 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+
+static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
+{
+ void *retval;
+ int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
+ int flags = MAP_PRIVATE;
+ int fd = -1;
+
+#ifdef PROT_MAX
+ prot |= PROT_MAX(prot);
+#endif
+
+#ifdef MAP_ANON
+ flags |= MAP_ANON;
+#else /* !MAP_ANON */
+ if (SLJIT_UNLIKELY((dev_zero < 0) && open_dev_zero()))
+ return NULL;
+
+ fd = dev_zero;
+#endif /* MAP_ANON */
+
+ retval = mmap(NULL, size, prot, flags, fd, 0);
+ if (retval == MAP_FAILED)
+ return NULL;
+
+ return retval;
+}
+
+static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
+{
+ munmap(chunk, size);
+}
+
+#include "sljitExecAllocatorCore.c"
diff --git a/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorWindows.c b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorWindows.c
new file mode 100644
index 0000000000..f152a5a2cd
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitExecAllocatorWindows.c
@@ -0,0 +1,40 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
+
+static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
+{
+ return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
+}
+
+static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
+{
+ SLJIT_UNUSED_ARG(size);
+ VirtualFree(chunk, 0, MEM_RELEASE);
+}
+
+#include "sljitExecAllocatorCore.c"
diff --git a/src/3rdparty/pcre2/src/sljit/allocator_src/sljitProtExecAllocatorNetBSD.c b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitProtExecAllocatorNetBSD.c
new file mode 100644
index 0000000000..0b7fd57787
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitProtExecAllocatorNetBSD.c
@@ -0,0 +1,72 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define SLJIT_HAS_CHUNK_HEADER
+#define SLJIT_HAS_EXECUTABLE_OFFSET
+
+struct sljit_chunk_header {
+ void *executable;
+};
+
+/*
+ * MAP_REMAPDUP is a NetBSD extension available sinde 8.0, make sure to
+ * adjust your feature macros (ex: -D_NETBSD_SOURCE) as needed
+ */
+static SLJIT_INLINE struct sljit_chunk_header* alloc_chunk(sljit_uw size)
+{
+ struct sljit_chunk_header *retval;
+
+ retval = (struct sljit_chunk_header *)mmap(NULL, size,
+ PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC),
+ MAP_ANON | MAP_SHARED, -1, 0);
+
+ if (retval == MAP_FAILED)
+ return NULL;
+
+ retval->executable = mremap(retval, size, NULL, size, MAP_REMAPDUP);
+ if (retval->executable == MAP_FAILED) {
+ munmap((void *)retval, size);
+ return NULL;
+ }
+
+ if (mprotect(retval->executable, size, PROT_READ | PROT_EXEC) == -1) {
+ munmap(retval->executable, size);
+ munmap((void *)retval, size);
+ return NULL;
+ }
+
+ return retval;
+}
+
+static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
+{
+ struct sljit_chunk_header *header = ((struct sljit_chunk_header *)chunk) - 1;
+
+ munmap(header->executable, size);
+ munmap((void *)header, size);
+}
+
+#include "sljitExecAllocatorCore.c"
diff --git a/src/3rdparty/pcre2/src/sljit/allocator_src/sljitProtExecAllocatorPosix.c b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitProtExecAllocatorPosix.c
new file mode 100644
index 0000000000..f7cb6c5670
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitProtExecAllocatorPosix.c
@@ -0,0 +1,172 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define SLJIT_HAS_CHUNK_HEADER
+#define SLJIT_HAS_EXECUTABLE_OFFSET
+
+struct sljit_chunk_header {
+ void *executable;
+};
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+
+#ifndef O_NOATIME
+#define O_NOATIME 0
+#endif
+
+/* this is a linux extension available since kernel 3.11 */
+#ifndef O_TMPFILE
+#define O_TMPFILE 0x404000
+#endif
+
+#ifndef _GNU_SOURCE
+char *secure_getenv(const char *name);
+int mkostemp(char *template, int flags);
+#endif
+
+static SLJIT_INLINE int create_tempfile(void)
+{
+ int fd;
+ char tmp_name[256];
+ size_t tmp_name_len = 0;
+ char *dir;
+ struct stat st;
+#if defined(SLJIT_SINGLE_THREADED) && SLJIT_SINGLE_THREADED
+ mode_t mode;
+#endif
+
+#ifdef HAVE_MEMFD_CREATE
+ /* this is a GNU extension, make sure to use -D_GNU_SOURCE */
+ fd = memfd_create("sljit", MFD_CLOEXEC);
+ if (fd != -1) {
+ fchmod(fd, 0);
+ return fd;
+ }
+#endif
+
+ dir = secure_getenv("TMPDIR");
+
+ if (dir) {
+ size_t len = strlen(dir);
+ if (len > 0 && len < sizeof(tmp_name)) {
+ if ((stat(dir, &st) == 0) && S_ISDIR(st.st_mode)) {
+ memcpy(tmp_name, dir, len + 1);
+ tmp_name_len = len;
+ }
+ }
+ }
+
+#ifdef P_tmpdir
+ if (!tmp_name_len) {
+ tmp_name_len = strlen(P_tmpdir);
+ if (tmp_name_len > 0 && tmp_name_len < sizeof(tmp_name))
+ strcpy(tmp_name, P_tmpdir);
+ }
+#endif
+ if (!tmp_name_len) {
+ strcpy(tmp_name, "/tmp");
+ tmp_name_len = 4;
+ }
+
+ SLJIT_ASSERT(tmp_name_len > 0 && tmp_name_len < sizeof(tmp_name));
+
+ if (tmp_name_len > 1 && tmp_name[tmp_name_len - 1] == '/')
+ tmp_name[--tmp_name_len] = '\0';
+
+ fd = open(tmp_name, O_TMPFILE | O_EXCL | O_RDWR | O_NOATIME | O_CLOEXEC, 0);
+ if (fd != -1)
+ return fd;
+
+ if (tmp_name_len >= sizeof(tmp_name) - 7)
+ return -1;
+
+ strcpy(tmp_name + tmp_name_len, "/XXXXXX");
+#if defined(SLJIT_SINGLE_THREADED) && SLJIT_SINGLE_THREADED
+ mode = umask(0777);
+#endif
+ fd = mkostemp(tmp_name, O_CLOEXEC | O_NOATIME);
+#if defined(SLJIT_SINGLE_THREADED) && SLJIT_SINGLE_THREADED
+ umask(mode);
+#else
+ fchmod(fd, 0);
+#endif
+
+ if (fd == -1)
+ return -1;
+
+ if (unlink(tmp_name)) {
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+static SLJIT_INLINE struct sljit_chunk_header* alloc_chunk(sljit_uw size)
+{
+ struct sljit_chunk_header *retval;
+ int fd;
+
+ fd = create_tempfile();
+ if (fd == -1)
+ return NULL;
+
+ if (ftruncate(fd, (off_t)size)) {
+ close(fd);
+ return NULL;
+ }
+
+ retval = (struct sljit_chunk_header *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+
+ if (retval == MAP_FAILED) {
+ close(fd);
+ return NULL;
+ }
+
+ retval->executable = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
+
+ if (retval->executable == MAP_FAILED) {
+ munmap((void *)retval, size);
+ close(fd);
+ return NULL;
+ }
+
+ close(fd);
+ return retval;
+}
+
+static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
+{
+ struct sljit_chunk_header *header = ((struct sljit_chunk_header *)chunk) - 1;
+
+ munmap(header->executable, size);
+ munmap((void *)header, size);
+}
+
+#include "sljitExecAllocatorCore.c"
diff --git a/src/3rdparty/pcre2/src/sljit/allocator_src/sljitWXExecAllocatorPosix.c b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitWXExecAllocatorPosix.c
new file mode 100644
index 0000000000..36d301434a
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitWXExecAllocatorPosix.c
@@ -0,0 +1,141 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file contains a simple W^X executable memory allocator
+
+ In *NIX, MAP_ANON is required (that is considered a feature) so make
+ sure to set the right availability macros for your system or the code
+ will fail to build.
+
+ If your system doesn't support mapping of anonymous pages (ex: IRIX) it
+ is also likely that it doesn't need this allocator and should be using
+ the standard one instead.
+
+ It allocates a separate map for each code block and may waste a lot of
+ memory, because whatever was requested, will be rounded up to the page
+ size (minimum 4KB, but could be even bigger).
+
+ It changes the page permissions (RW <-> RX) as needed and therefore, if you
+ will be updating the code after it has been generated, need to make sure to
+ block any concurrent execution, or could result in a SIGBUS, that could
+ even manifest itself at a different address than the one that was being
+ modified.
+
+ Only use if you are unable to use the regular allocator because of security
+ restrictions and adding exceptions to your application or the system are
+ not possible.
+*/
+
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec) \
+ sljit_update_wx_flags((from), (to), (enable_exec))
+
+#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
+#include <pthread.h>
+#define SLJIT_SE_LOCK() pthread_mutex_lock(&se_lock)
+#define SLJIT_SE_UNLOCK() pthread_mutex_unlock(&se_lock)
+#else
+#define SLJIT_SE_LOCK()
+#define SLJIT_SE_UNLOCK()
+#endif /* !SLJIT_SINGLE_THREADED */
+
+#define SLJIT_WX_IS_BLOCK(ptr, size) generic_check_is_wx_block(ptr, size)
+
+static SLJIT_INLINE int generic_check_is_wx_block(void *ptr, sljit_uw size)
+{
+ if (SLJIT_LIKELY(!mprotect(ptr, size, PROT_EXEC)))
+ return !!mprotect(ptr, size, PROT_READ | PROT_WRITE);
+
+ return 1;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
+{
+#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
+ static pthread_mutex_t se_lock = PTHREAD_MUTEX_INITIALIZER;
+#endif
+ static int wx_block = -1;
+ int prot = PROT_READ | PROT_WRITE;
+ sljit_uw* ptr;
+
+ if (SLJIT_UNLIKELY(wx_block > 0))
+ return NULL;
+
+#ifdef PROT_MAX
+ prot |= PROT_MAX(PROT_READ | PROT_WRITE | PROT_EXEC);
+#endif
+
+ size += sizeof(sljit_uw);
+ ptr = (sljit_uw*)mmap(NULL, size, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (ptr == MAP_FAILED)
+ return NULL;
+
+ if (SLJIT_UNLIKELY(wx_block < 0)) {
+ SLJIT_SE_LOCK();
+ wx_block = SLJIT_WX_IS_BLOCK(ptr, size);
+ SLJIT_SE_UNLOCK();
+ if (SLJIT_UNLIKELY(wx_block)) {
+ munmap((void *)ptr, size);
+ return NULL;
+ }
+ }
+
+ *ptr++ = size;
+ return ptr;
+}
+
+#undef SLJIT_SE_UNLOCK
+#undef SLJIT_SE_LOCK
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
+{
+ sljit_uw *start_ptr = ((sljit_uw*)ptr) - 1;
+ munmap((void*)start_ptr, *start_ptr);
+}
+
+static void sljit_update_wx_flags(void *from, void *to, int enable_exec)
+{
+ sljit_uw page_mask = (sljit_uw)get_page_alignment();
+ sljit_uw start = (sljit_uw)from;
+ sljit_uw end = (sljit_uw)to;
+ int prot = PROT_READ | (enable_exec ? PROT_EXEC : PROT_WRITE);
+
+ SLJIT_ASSERT(start < end);
+
+ start &= ~page_mask;
+ end = (end + page_mask) & ~page_mask;
+
+ mprotect((void*)start, end - start, prot);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
+{
+ /* This allocator does not keep unused memory for future allocations. */
+}
diff --git a/src/3rdparty/pcre2/src/sljit/allocator_src/sljitWXExecAllocatorWindows.c b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitWXExecAllocatorWindows.c
new file mode 100644
index 0000000000..a9553bd7da
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/allocator_src/sljitWXExecAllocatorWindows.c
@@ -0,0 +1,102 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ This file contains a simple W^X executable memory allocator
+
+ In *NIX, MAP_ANON is required (that is considered a feature) so make
+ sure to set the right availability macros for your system or the code
+ will fail to build.
+
+ If your system doesn't support mapping of anonymous pages (ex: IRIX) it
+ is also likely that it doesn't need this allocator and should be using
+ the standard one instead.
+
+ It allocates a separate map for each code block and may waste a lot of
+ memory, because whatever was requested, will be rounded up to the page
+ size (minimum 4KB, but could be even bigger).
+
+ It changes the page permissions (RW <-> RX) as needed and therefore, if you
+ will be updating the code after it has been generated, need to make sure to
+ block any concurrent execution, or could result in a SIGBUS, that could
+ even manifest itself at a different address than the one that was being
+ modified.
+
+ Only use if you are unable to use the regular allocator because of security
+ restrictions and adding exceptions to your application or the system are
+ not possible.
+*/
+
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec) \
+ sljit_update_wx_flags((from), (to), (enable_exec))
+
+SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
+{
+ sljit_uw *ptr;
+
+ size += sizeof(sljit_uw);
+ ptr = (sljit_uw*)VirtualAlloc(NULL, size,
+ MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+
+ if (!ptr)
+ return NULL;
+
+ *ptr++ = size;
+
+ return ptr;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
+{
+ sljit_uw start = (sljit_uw)ptr - sizeof(sljit_uw);
+#if defined(SLJIT_DEBUG) && SLJIT_DEBUG
+ sljit_uw page_mask = (sljit_uw)get_page_alignment();
+
+ SLJIT_ASSERT(!(start & page_mask));
+#endif
+ VirtualFree((void*)start, 0, MEM_RELEASE);
+}
+
+static void sljit_update_wx_flags(void *from, void *to, sljit_s32 enable_exec)
+{
+ DWORD oldprot;
+ sljit_uw page_mask = (sljit_uw)get_page_alignment();
+ sljit_uw start = (sljit_uw)from;
+ sljit_uw end = (sljit_uw)to;
+ DWORD prot = enable_exec ? PAGE_EXECUTE : PAGE_READWRITE;
+
+ SLJIT_ASSERT(start < end);
+
+ start &= ~page_mask;
+ end = (end + page_mask) & ~page_mask;
+
+ VirtualProtect((void*)start, end - start, prot, &oldprot);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
+{
+ /* This allocator does not keep unused memory for future allocations. */
+}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitConfig.h b/src/3rdparty/pcre2/src/sljit/sljitConfig.h
index 1c821d287d..364c8bb788 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitConfig.h
+++ b/src/3rdparty/pcre2/src/sljit/sljitConfig.h
@@ -39,27 +39,6 @@ extern "C" {
*/
/* --------------------------------------------------------------------- */
-/* Architecture */
-/* --------------------------------------------------------------------- */
-
-/* Architecture selection. */
-/* #define SLJIT_CONFIG_X86_32 1 */
-/* #define SLJIT_CONFIG_X86_64 1 */
-/* #define SLJIT_CONFIG_ARM_V5 1 */
-/* #define SLJIT_CONFIG_ARM_V7 1 */
-/* #define SLJIT_CONFIG_ARM_THUMB2 1 */
-/* #define SLJIT_CONFIG_ARM_64 1 */
-/* #define SLJIT_CONFIG_PPC_32 1 */
-/* #define SLJIT_CONFIG_PPC_64 1 */
-/* #define SLJIT_CONFIG_MIPS_32 1 */
-/* #define SLJIT_CONFIG_MIPS_64 1 */
-/* #define SLJIT_CONFIG_SPARC_32 1 */
-/* #define SLJIT_CONFIG_S390X 1 */
-
-/* #define SLJIT_CONFIG_AUTO 1 */
-/* #define SLJIT_CONFIG_UNSUPPORTED 1 */
-
-/* --------------------------------------------------------------------- */
/* Utilities */
/* --------------------------------------------------------------------- */
@@ -95,7 +74,9 @@ extern "C" {
/* Executable code allocation:
If SLJIT_EXECUTABLE_ALLOCATOR is not defined, the application should
- define SLJIT_MALLOC_EXEC, SLJIT_FREE_EXEC, and SLJIT_EXEC_OFFSET. */
+ define SLJIT_MALLOC_EXEC and SLJIT_FREE_EXEC.
+ Optionally, depending on the implementation used for the allocator,
+ SLJIT_EXEC_OFFSET and SLJIT_UPDATE_WX_FLAGS might also be needed. */
#ifndef SLJIT_EXECUTABLE_ALLOCATOR
/* Enabled by default. */
#define SLJIT_EXECUTABLE_ALLOCATOR 1
@@ -127,17 +108,6 @@ extern "C" {
#endif /* !SLJIT_EXECUTABLE_ALLOCATOR */
-/* Force cdecl calling convention even if a better calling
- convention (e.g. fastcall) is supported by the C compiler.
- If this option is disabled (this is the default), functions
- called from JIT should be defined with SLJIT_FUNC attribute.
- Standard C functions can still be called by using the
- SLJIT_CALL_CDECL jump type. */
-#ifndef SLJIT_USE_CDECL_CALLING_CONVENTION
-/* Disabled by default */
-#define SLJIT_USE_CDECL_CALLING_CONVENTION 0
-#endif
-
/* Return with error when an invalid argument is passed. */
#ifndef SLJIT_ARGUMENT_CHECKS
/* Disabled by default */
diff --git a/src/3rdparty/pcre2/src/sljit/sljitConfigCPU.h b/src/3rdparty/pcre2/src/sljit/sljitConfigCPU.h
new file mode 100644
index 0000000000..2720bdab0b
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/sljitConfigCPU.h
@@ -0,0 +1,188 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SLJIT_CONFIG_CPU_H_
+#define SLJIT_CONFIG_CPU_H_
+
+/* --------------------------------------------------------------------- */
+/* Architecture */
+/* --------------------------------------------------------------------- */
+
+/* Architecture selection. */
+/* #define SLJIT_CONFIG_X86_32 1 */
+/* #define SLJIT_CONFIG_X86_64 1 */
+/* #define SLJIT_CONFIG_ARM_V6 1 */
+/* #define SLJIT_CONFIG_ARM_V7 1 */
+/* #define SLJIT_CONFIG_ARM_THUMB2 1 */
+/* #define SLJIT_CONFIG_ARM_64 1 */
+/* #define SLJIT_CONFIG_PPC_32 1 */
+/* #define SLJIT_CONFIG_PPC_64 1 */
+/* #define SLJIT_CONFIG_MIPS_32 1 */
+/* #define SLJIT_CONFIG_MIPS_64 1 */
+/* #define SLJIT_CONFIG_RISCV_32 1 */
+/* #define SLJIT_CONFIG_RISCV_64 1 */
+/* #define SLJIT_CONFIG_S390X 1 */
+/* #define SLJIT_CONFIG_LOONGARCH_64 */
+
+/* #define SLJIT_CONFIG_AUTO 1 */
+/* #define SLJIT_CONFIG_UNSUPPORTED 1 */
+
+/*****************/
+/* Sanity check. */
+/*****************/
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \
+ + (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \
+ + (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6) \
+ + (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \
+ + (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \
+ + (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
+ + (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \
+ + (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
+ + (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
+ + (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \
+ + (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) \
+ + (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) \
+ + (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
+ + (defined SLJIT_CONFIG_LOONGARCH_64 && SLJIT_CONFIG_LOONGARCH_64) \
+ + (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \
+ + (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) >= 2
+#error "Multiple architectures are selected"
+#endif
+
+#if !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \
+ && !(defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \
+ && !(defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6) \
+ && !(defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \
+ && !(defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \
+ && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
+ && !(defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \
+ && !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
+ && !(defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
+ && !(defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \
+ && !(defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) \
+ && !(defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) \
+ && !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
+ && !(defined SLJIT_CONFIG_LOONGARCH_64 && SLJIT_CONFIG_LOONGARCH_64) \
+ && !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) \
+ && !(defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO)
+#if defined SLJIT_CONFIG_AUTO && !SLJIT_CONFIG_AUTO
+#error "An architecture must be selected"
+#else /* SLJIT_CONFIG_AUTO */
+#define SLJIT_CONFIG_AUTO 1
+#endif /* !SLJIT_CONFIG_AUTO */
+#endif /* !SLJIT_CONFIG */
+
+/********************************************************/
+/* Automatic CPU detection (requires compiler support). */
+/********************************************************/
+
+#if (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO)
+#ifndef _WIN32
+
+#if defined(__i386__) || defined(__i386)
+#define SLJIT_CONFIG_X86_32 1
+#elif defined(__x86_64__)
+#define SLJIT_CONFIG_X86_64 1
+#elif defined(__aarch64__)
+#define SLJIT_CONFIG_ARM_64 1
+#elif defined(__thumb2__)
+#define SLJIT_CONFIG_ARM_THUMB2 1
+#elif (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
+ ((defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7S__)) \
+ || (defined(__ARM_ARCH_8A__) || defined(__ARM_ARCH_8R__)) \
+ || (defined(__ARM_ARCH_9A__)))
+#define SLJIT_CONFIG_ARM_V7 1
+#elif defined(__arm__) || defined (__ARM__)
+#define SLJIT_CONFIG_ARM_V6 1
+#elif defined(__ppc64__) || defined(__powerpc64__) || (defined(_ARCH_PPC64) && defined(__64BIT__)) || (defined(_POWER) && defined(__64BIT__))
+#define SLJIT_CONFIG_PPC_64 1
+#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) || defined(_ARCH_PWR) || defined(_ARCH_PWR2) || defined(_POWER)
+#define SLJIT_CONFIG_PPC_32 1
+#elif defined(__mips__) && !defined(_LP64)
+#define SLJIT_CONFIG_MIPS_32 1
+#elif defined(__mips64)
+#define SLJIT_CONFIG_MIPS_64 1
+#elif defined (__riscv_xlen) && (__riscv_xlen == 32)
+#define SLJIT_CONFIG_RISCV_32 1
+#elif defined (__riscv_xlen) && (__riscv_xlen == 64)
+#define SLJIT_CONFIG_RISCV_64 1
+#elif defined (__loongarch_lp64)
+#define SLJIT_CONFIG_LOONGARCH_64 1
+#elif defined(__s390x__)
+#define SLJIT_CONFIG_S390X 1
+#else
+/* Unsupported architecture */
+#define SLJIT_CONFIG_UNSUPPORTED 1
+#endif
+
+#else /* _WIN32 */
+
+#if defined(_M_X64) || defined(__x86_64__)
+#define SLJIT_CONFIG_X86_64 1
+#elif (defined(_M_ARM) && _M_ARM >= 7 && defined(_M_ARMT)) || defined(__thumb2__)
+#define SLJIT_CONFIG_ARM_THUMB2 1
+#elif (defined(_M_ARM) && _M_ARM >= 7)
+#define SLJIT_CONFIG_ARM_V7 1
+#elif defined(_ARM_)
+#define SLJIT_CONFIG_ARM_V6 1
+#elif defined(_M_ARM64) || defined(__aarch64__)
+#define SLJIT_CONFIG_ARM_64 1
+#else
+#define SLJIT_CONFIG_X86_32 1
+#endif
+
+#endif /* !_WIN32 */
+#endif /* SLJIT_CONFIG_AUTO */
+
+#if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED)
+#undef SLJIT_EXECUTABLE_ALLOCATOR
+#endif /* SLJIT_CONFIG_UNSUPPORTED */
+
+/******************************/
+/* CPU family type detection. */
+/******************************/
+
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \
+ || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2)
+#define SLJIT_CONFIG_ARM_32 1
+#endif
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+#define SLJIT_CONFIG_X86 1
+#elif (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
+#define SLJIT_CONFIG_ARM 1
+#elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+#define SLJIT_CONFIG_PPC 1
+#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+#define SLJIT_CONFIG_MIPS 1
+#elif (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) || (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+#define SLJIT_CONFIG_RISCV 1
+#elif (defined SLJIT_CONFIG_LOONGARCH_64 && SLJIT_CONFIG_LOONGARCH_64)
+#define SLJIT_CONFIG_LOONGARCH 1
+#endif
+
+#endif /* SLJIT_CONFIG_CPU_H_ */
diff --git a/src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h b/src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h
index 7bb9990a59..ce4e7b04ec 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h
+++ b/src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h
@@ -59,8 +59,11 @@ extern "C" {
SLJIT_64BIT_ARCHITECTURE : 64 bit architecture
SLJIT_LITTLE_ENDIAN : little endian architecture
SLJIT_BIG_ENDIAN : big endian architecture
- SLJIT_UNALIGNED : allows unaligned memory accesses for non-fpu operations (only!)
- SLJIT_INDIRECT_CALL : see SLJIT_FUNC_OFFSET() for more information
+ SLJIT_UNALIGNED : unaligned memory accesses for non-fpu operations are supported
+ SLJIT_FPU_UNALIGNED : unaligned memory accesses for fpu operations are supported
+ SLJIT_MASKED_SHIFT : all word shifts are always masked
+ SLJIT_MASKED_SHIFT32 : all 32 bit shifts are always masked
+ SLJIT_INDIRECT_CALL : see SLJIT_FUNC_ADDR() for more information
Constants:
SLJIT_NUMBER_OF_REGISTERS : number of available registers
@@ -69,6 +72,8 @@ extern "C" {
SLJIT_NUMBER_OF_FLOAT_REGISTERS : number of available floating point registers
SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS : number of available floating point scratch registers
SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS : number of available floating point saved registers
+ SLJIT_NUMBER_OF_TEMPORARY_REGISTERS : number of available temporary registers
+ SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS : number of available temporary floating point registers
SLJIT_WORD_SHIFT : the shift required to apply when accessing a sljit_sw/sljit_uw array by index
SLJIT_F32_SHIFT : the shift required to apply when accessing
a single precision floating point array by index
@@ -78,137 +83,27 @@ extern "C" {
the scratch register index of ecx is stored in this variable
SLJIT_LOCALS_OFFSET : local space starting offset (SLJIT_SP + SLJIT_LOCALS_OFFSET)
SLJIT_RETURN_ADDRESS_OFFSET : a return instruction always adds this offset to the return address
+ SLJIT_CONV_MAX_FLOAT : result when a floating point value is converted to integer
+ and the floating point value is higher than the maximum integer value
+ (possible values: SLJIT_CONV_RESULT_MAX_INT or SLJIT_CONV_RESULT_MIN_INT)
+ SLJIT_CONV_MIN_FLOAT : result when a floating point value is converted to integer
+ and the floating point value is lower than the minimum integer value
+ (possible values: SLJIT_CONV_RESULT_MAX_INT or SLJIT_CONV_RESULT_MIN_INT)
+ SLJIT_CONV_NAN_FLOAT : result when a NaN floating point value is converted to integer
+ (possible values: SLJIT_CONV_RESULT_MAX_INT, SLJIT_CONV_RESULT_MIN_INT,
+ or SLJIT_CONV_RESULT_ZERO)
Other macros:
+ SLJIT_TMP_R0 .. R9 : accessing temporary registers
+ SLJIT_TMP_R(i) : accessing temporary registers
+ SLJIT_TMP_FR0 .. FR9 : accessing temporary floating point registers
+ SLJIT_TMP_FR(i) : accessing temporary floating point registers
SLJIT_FUNC : calling convention attribute for both calling JIT from C and C calling back from JIT
SLJIT_W(number) : defining 64 bit constants on 64 bit architectures (platform independent helper)
+ SLJIT_F64_SECOND(reg) : provides the register index of the second 32 bit part of a 64 bit
+ floating point register when SLJIT_HAS_F64_AS_F32_PAIR returns non-zero
*/
-/*****************/
-/* Sanity check. */
-/*****************/
-
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \
- + (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \
- + (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) \
- + (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \
- + (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \
- + (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
- + (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \
- + (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
- + (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
- + (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \
- + (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \
- + (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
- + (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \
- + (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) >= 2
-#error "Multiple architectures are selected"
-#endif
-
-#if !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \
- && !(defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \
- && !(defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) \
- && !(defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \
- && !(defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \
- && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
- && !(defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \
- && !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
- && !(defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
- && !(defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \
- && !(defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \
- && !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
- && !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) \
- && !(defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO)
-#if defined SLJIT_CONFIG_AUTO && !SLJIT_CONFIG_AUTO
-#error "An architecture must be selected"
-#else /* SLJIT_CONFIG_AUTO */
-#define SLJIT_CONFIG_AUTO 1
-#endif /* !SLJIT_CONFIG_AUTO */
-#endif /* !SLJIT_CONFIG */
-
-/********************************************************/
-/* Automatic CPU detection (requires compiler support). */
-/********************************************************/
-
-#if (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO)
-
-#ifndef _WIN32
-
-#if defined(__i386__) || defined(__i386)
-#define SLJIT_CONFIG_X86_32 1
-#elif defined(__x86_64__)
-#define SLJIT_CONFIG_X86_64 1
-#elif defined(__arm__) || defined(__ARM__)
-#ifdef __thumb2__
-#define SLJIT_CONFIG_ARM_THUMB2 1
-#elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__)
-#define SLJIT_CONFIG_ARM_V7 1
-#else
-#define SLJIT_CONFIG_ARM_V5 1
-#endif
-#elif defined (__aarch64__)
-#define SLJIT_CONFIG_ARM_64 1
-#elif defined(__ppc64__) || defined(__powerpc64__) || defined(_ARCH_PPC64) || (defined(_POWER) && defined(__64BIT__))
-#define SLJIT_CONFIG_PPC_64 1
-#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) || defined(_ARCH_PWR) || defined(_ARCH_PWR2) || defined(_POWER)
-#define SLJIT_CONFIG_PPC_32 1
-#elif defined(__mips__) && !defined(_LP64)
-#define SLJIT_CONFIG_MIPS_32 1
-#elif defined(__mips64)
-#define SLJIT_CONFIG_MIPS_64 1
-#elif defined(__sparc__) || defined(__sparc)
-#define SLJIT_CONFIG_SPARC_32 1
-#elif defined(__s390x__)
-#define SLJIT_CONFIG_S390X 1
-#else
-/* Unsupported architecture */
-#define SLJIT_CONFIG_UNSUPPORTED 1
-#endif
-
-#else /* _WIN32 */
-
-#if defined(_M_X64) || defined(__x86_64__)
-#define SLJIT_CONFIG_X86_64 1
-#elif (defined(_M_ARM) && _M_ARM >= 7 && defined(_M_ARMT)) || defined(__thumb2__)
-#define SLJIT_CONFIG_ARM_THUMB2 1
-#elif (defined(_M_ARM) && _M_ARM >= 7)
-#define SLJIT_CONFIG_ARM_V7 1
-#elif defined(_ARM_)
-#define SLJIT_CONFIG_ARM_V5 1
-#elif defined(_M_ARM64) || defined(__aarch64__)
-#define SLJIT_CONFIG_ARM_64 1
-#else
-#define SLJIT_CONFIG_X86_32 1
-#endif
-
-#endif /* !_WIN32 */
-#endif /* SLJIT_CONFIG_AUTO */
-
-#if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED)
-#undef SLJIT_EXECUTABLE_ALLOCATOR
-#endif
-
-/******************************/
-/* CPU family type detection. */
-/******************************/
-
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \
- || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2)
-#define SLJIT_CONFIG_ARM_32 1
-#endif
-
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-#define SLJIT_CONFIG_X86 1
-#elif (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
-#define SLJIT_CONFIG_ARM 1
-#elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
-#define SLJIT_CONFIG_PPC 1
-#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
-#define SLJIT_CONFIG_MIPS 1
-#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) || (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64)
-#define SLJIT_CONFIG_SPARC 1
-#endif
-
/***********************************************************/
/* Intel Control-flow Enforcement Technology (CET) spport. */
/***********************************************************/
@@ -274,9 +169,13 @@ extern "C" {
#ifndef SLJIT_INLINE
/* Inline functions. Some old compilers do not support them. */
-#if defined(__SUNPRO_C) && __SUNPRO_C <= 0x510
+#ifdef __SUNPRO_C
+#if __SUNPRO_C < 0x560
#define SLJIT_INLINE
#else
+#define SLJIT_INLINE inline
+#endif /* __SUNPRO_C */
+#else
#define SLJIT_INLINE __inline
#endif
#endif /* !SLJIT_INLINE */
@@ -319,30 +218,58 @@ extern "C" {
/* Instruction cache flush. */
/****************************/
+#ifdef __APPLE__
+#include <AvailabilityMacros.h>
+#endif
+
+/*
+ * TODO:
+ *
+ * clang >= 15 could be safe to enable below
+ * older versions are known to abort in some targets
+ * https://github.com/PhilipHazel/pcre2/issues/92
+ *
+ * beware some vendors (ex: Microsoft, Apple) are known to have
+ * removed the code to support this builtin even if the call for
+ * __has_builtin reports it is available.
+ *
+ * make sure linking doesn't fail because __clear_cache() is
+ * missing before changing it or add an exception so that the
+ * system provided method that should be defined below is used
+ * instead.
+ */
#if (!defined SLJIT_CACHE_FLUSH && defined __has_builtin)
-#if __has_builtin(__builtin___clear_cache)
+#if __has_builtin(__builtin___clear_cache) && !defined(__clang__)
+/*
+ * https://gcc.gnu.org/bugzilla//show_bug.cgi?id=91248
+ * https://gcc.gnu.org/bugzilla//show_bug.cgi?id=93811
+ * gcc's clear_cache builtin for power is broken
+ */
+#if !defined(SLJIT_CONFIG_PPC)
#define SLJIT_CACHE_FLUSH(from, to) \
__builtin___clear_cache((char*)(from), (char*)(to))
+#endif
-#endif /* __has_builtin(__builtin___clear_cache) */
+#endif /* gcc >= 10 */
#endif /* (!defined SLJIT_CACHE_FLUSH && defined __has_builtin) */
#ifndef SLJIT_CACHE_FLUSH
-#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
+#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
+ || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
/* Not required to implement on archs with unified caches. */
#define SLJIT_CACHE_FLUSH(from, to)
-#elif defined __APPLE__
+#elif defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
/* Supported by all macs since Mac OS 10.5.
However, it does not work on non-jailbroken iOS devices,
although the compilation is successful. */
-
+#include <libkern/OSCacheControl.h>
#define SLJIT_CACHE_FLUSH(from, to) \
- sys_icache_invalidate((char*)(from), (char*)(to) - (char*)(from))
+ sys_icache_invalidate((void*)(from), (size_t)((char*)(to) - (char*)(from)))
#elif (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
@@ -351,33 +278,26 @@ extern "C" {
ppc_cache_flush((from), (to))
#define SLJIT_CACHE_FLUSH_OWN_IMPL 1
-#elif (defined(__GNUC__) && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)))
-
-#define SLJIT_CACHE_FLUSH(from, to) \
- __builtin___clear_cache((char*)(from), (char*)(to))
-
-#elif defined __ANDROID__
-
-/* Android lacks __clear_cache; instead, cacheflush should be used. */
+#elif defined(_WIN32)
#define SLJIT_CACHE_FLUSH(from, to) \
- cacheflush((long)(from), (long)(to), 0)
+ FlushInstructionCache(GetCurrentProcess(), (void*)(from), (char*)(to) - (char*)(from))
-#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
+#elif (defined(__GNUC__) && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) || defined(__clang__)
-/* The __clear_cache() implementation of GCC is a dummy function on Sparc. */
#define SLJIT_CACHE_FLUSH(from, to) \
- sparc_cache_flush((from), (to))
-#define SLJIT_CACHE_FLUSH_OWN_IMPL 1
+ __builtin___clear_cache((char*)(from), (char*)(to))
-#elif defined _WIN32
+#elif defined __ANDROID__
+/* Android ARMv7 with gcc lacks __clear_cache; use cacheflush instead. */
+#include <sys/cachectl.h>
#define SLJIT_CACHE_FLUSH(from, to) \
- FlushInstructionCache(GetCurrentProcess(), (char*)(from), (char*)(to) - (char*)(from))
+ cacheflush((long)(from), (long)(to), 0)
#else
-/* Calls __ARM_NR_cacheflush on ARM-Linux. */
+/* Call __ARM_NR_cacheflush on ARM-Linux or the corresponding MIPS syscall. */
#define SLJIT_CACHE_FLUSH(from, to) \
__clear_cache((char*)(from), (char*)(to))
@@ -407,13 +327,15 @@ typedef signed int sljit_s32;
#if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED)
/* Just to have something. */
#define SLJIT_WORD_SHIFT 0
-typedef unsigned long int sljit_uw;
-typedef long int sljit_sw;
+typedef unsigned int sljit_uw;
+typedef int sljit_sw;
#elif !(defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \
&& !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
&& !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
&& !(defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \
- && !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
+ && !(defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) \
+ && !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
+ && !(defined SLJIT_CONFIG_LOONGARCH_64 && SLJIT_CONFIG_LOONGARCH_64)
#define SLJIT_32BIT_ARCHITECTURE 1
#define SLJIT_WORD_SHIFT 2
typedef unsigned int sljit_uw;
@@ -449,12 +371,42 @@ typedef double sljit_f64;
#define SLJIT_F32_SHIFT 2
#define SLJIT_F64_SHIFT 3
+#define SLJIT_CONV_RESULT_MAX_INT 0
+#define SLJIT_CONV_RESULT_MIN_INT 1
+#define SLJIT_CONV_RESULT_ZERO 2
+
+#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
+#define SLJIT_CONV_MAX_FLOAT SLJIT_CONV_RESULT_MIN_INT
+#define SLJIT_CONV_MIN_FLOAT SLJIT_CONV_RESULT_MIN_INT
+#define SLJIT_CONV_NAN_FLOAT SLJIT_CONV_RESULT_MIN_INT
+#elif (defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM)
+#define SLJIT_CONV_MAX_FLOAT SLJIT_CONV_RESULT_MAX_INT
+#define SLJIT_CONV_MIN_FLOAT SLJIT_CONV_RESULT_MIN_INT
+#define SLJIT_CONV_NAN_FLOAT SLJIT_CONV_RESULT_ZERO
+#elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
+#define SLJIT_CONV_MAX_FLOAT SLJIT_CONV_RESULT_MAX_INT
+#define SLJIT_CONV_MIN_FLOAT SLJIT_CONV_RESULT_MAX_INT
+#define SLJIT_CONV_NAN_FLOAT SLJIT_CONV_RESULT_MAX_INT
+#elif (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
+#define SLJIT_CONV_MAX_FLOAT SLJIT_CONV_RESULT_MAX_INT
+#define SLJIT_CONV_MIN_FLOAT SLJIT_CONV_RESULT_MIN_INT
+#define SLJIT_CONV_NAN_FLOAT SLJIT_CONV_RESULT_MIN_INT
+#elif (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV)
+#define SLJIT_CONV_MAX_FLOAT SLJIT_CONV_RESULT_MAX_INT
+#define SLJIT_CONV_MIN_FLOAT SLJIT_CONV_RESULT_MIN_INT
+#define SLJIT_CONV_NAN_FLOAT SLJIT_CONV_RESULT_MAX_INT
+#elif (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
+#define SLJIT_CONV_MAX_FLOAT SLJIT_CONV_RESULT_MAX_INT
+#define SLJIT_CONV_MIN_FLOAT SLJIT_CONV_RESULT_MIN_INT
+#define SLJIT_CONV_NAN_FLOAT SLJIT_CONV_RESULT_MIN_INT
+#else
+#error "Result for float to integer conversion is not defined"
+#endif
+
#ifndef SLJIT_W
/* Defining long constants. */
-#if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED)
-#define SLJIT_W(w) (w##l)
-#elif (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE)
+#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE)
#ifdef _WIN64
#define SLJIT_W(w) (w##ll)
#else /* !windows */
@@ -473,8 +425,7 @@ typedef double sljit_f64;
#if !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN)
/* These macros are mostly useful for the applications. */
-#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \
- || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+#if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
#ifdef __LITTLE_ENDIAN__
#define SLJIT_LITTLE_ENDIAN 1
@@ -482,8 +433,7 @@ typedef double sljit_f64;
#define SLJIT_BIG_ENDIAN 1
#endif
-#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
- || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+#elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
#ifdef __MIPSEL__
#define SLJIT_LITTLE_ENDIAN 1
@@ -496,9 +446,10 @@ typedef double sljit_f64;
/* Auto detecting mips revision. */
#if (defined __mips_isa_rev) && (__mips_isa_rev >= 6)
#define SLJIT_MIPS_REV 6
-#elif (defined __mips_isa_rev && __mips_isa_rev >= 1) \
- || (defined __clang__ && defined _MIPS_ARCH_OCTEON) \
- || (defined __clang__ && defined _MIPS_ARCH_P5600)
+#elif defined(__mips_isa_rev) && __mips_isa_rev >= 1
+#define SLJIT_MIPS_REV __mips_isa_rev
+#elif defined(__clang__) \
+ && (defined(_MIPS_ARCH_OCTEON) || defined(_MIPS_ARCH_P5600))
/* clang either forgets to define (clang-7) __mips_isa_rev at all
* or sets it to zero (clang-8,-9) for -march=octeon (MIPS64 R2+)
* and -march=p5600 (MIPS32 R5).
@@ -510,8 +461,7 @@ typedef double sljit_f64;
#endif /* !SLJIT_MIPS_REV */
-#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \
- || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
+#elif (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
#define SLJIT_BIG_ENDIAN 1
@@ -532,19 +482,32 @@ typedef double sljit_f64;
#ifndef SLJIT_UNALIGNED
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \
- || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \
+#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
|| (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \
|| (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \
|| (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
- || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \
- || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
- || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
+ || (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \
+ || (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \
+ || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
+ || (defined SLJIT_CONFIG_LOONGARCH && SLJIT_CONFIG_LOONGARCH)
#define SLJIT_UNALIGNED 1
#endif
#endif /* !SLJIT_UNALIGNED */
+#ifndef SLJIT_FPU_UNALIGNED
+
+#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
+ || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
+ || (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \
+ || (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \
+ || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
+ || (defined SLJIT_CONFIG_LOONGARCH && SLJIT_CONFIG_LOONGARCH)
+#define SLJIT_FPU_UNALIGNED 1
+#endif
+
+#endif /* !SLJIT_FPU_UNALIGNED */
+
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
/* Auto detect SSE2 support using CPUID.
On 64 bit x86 cpus, sse2 must be present. */
@@ -556,39 +519,21 @@ typedef double sljit_f64;
/*****************************************************************************************/
#ifndef SLJIT_FUNC
-
-#if (defined SLJIT_USE_CDECL_CALLING_CONVENTION && SLJIT_USE_CDECL_CALLING_CONVENTION) \
- || !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
-
-#define SLJIT_FUNC
-
-#elif defined(__GNUC__) && !defined(__APPLE__)
-
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
-#define SLJIT_FUNC __attribute__ ((fastcall))
-#define SLJIT_X86_32_FASTCALL 1
-#else
-#define SLJIT_FUNC
-#endif /* gcc >= 3.4 */
-
-#elif defined(_MSC_VER)
-
-#define SLJIT_FUNC __fastcall
-#define SLJIT_X86_32_FASTCALL 1
-
-#elif defined(__BORLANDC__)
-
-#define SLJIT_FUNC __msfastcall
-#define SLJIT_X86_32_FASTCALL 1
-
-#else /* Unknown compiler. */
-
-/* The cdecl calling convention is usually the x86 default. */
#define SLJIT_FUNC
+#endif /* !SLJIT_FUNC */
-#endif /* SLJIT_USE_CDECL_CALLING_CONVENTION */
+/* Disable instrumentation for these functions as they may not be sound */
+#ifndef SLJIT_FUNC_ATTRIBUTE
+#if defined(__has_feature)
+#if __has_feature(memory_sanitizer)
+#define SLJIT_FUNC_ATTRIBUTE __attribute__((no_sanitize("memory")))
+#endif /* __has_feature(memory_sanitizer) */
+#endif /* defined(__has_feature) */
+#endif
-#endif /* !SLJIT_FUNC */
+#ifndef SLJIT_FUNC_ATTRIBUTE
+#define SLJIT_FUNC_ATTRIBUTE
+#endif
#ifndef SLJIT_INDIRECT_CALL
#if ((defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) && (!defined _CALL_ELF || _CALL_ELF == 1)) \
@@ -599,14 +544,10 @@ typedef double sljit_f64;
#endif
#endif /* SLJIT_INDIRECT_CALL */
-/* The offset which needs to be substracted from the return address to
+/* The offset which needs to be subtracted from the return address to
determine the next executed instruction after return. */
#ifndef SLJIT_RETURN_ADDRESS_OFFSET
-#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
-#define SLJIT_RETURN_ADDRESS_OFFSET 8
-#else
#define SLJIT_RETURN_ADDRESS_OFFSET 0
-#endif
#endif /* SLJIT_RETURN_ADDRESS_OFFSET */
/***************************************************/
@@ -631,12 +572,14 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void);
#if (defined SLJIT_PROT_EXECUTABLE_ALLOCATOR && SLJIT_PROT_EXECUTABLE_ALLOCATOR)
SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
#define SLJIT_EXEC_OFFSET(ptr) sljit_exec_offset(ptr)
-#else
-#define SLJIT_EXEC_OFFSET(ptr) 0
#endif
#endif /* SLJIT_EXECUTABLE_ALLOCATOR */
+#ifndef SLJIT_EXEC_OFFSET
+#define SLJIT_EXEC_OFFSET(ptr) 0
+#endif
+
/**********************************************/
/* Registers and locals offset determination. */
/**********************************************/
@@ -644,51 +587,72 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
#define SLJIT_NUMBER_OF_REGISTERS 12
-#define SLJIT_NUMBER_OF_SAVED_REGISTERS 9
-#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset)
+#define SLJIT_NUMBER_OF_SAVED_REGISTERS 7
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 1
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 7
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 0
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 1
+#define SLJIT_LOCALS_OFFSET_BASE (8 * SSIZE_OF(sw))
#define SLJIT_PREF_SHIFT_REG SLJIT_R2
+#define SLJIT_MASKED_SHIFT 1
+#define SLJIT_MASKED_SHIFT32 1
#elif (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
#define SLJIT_NUMBER_OF_REGISTERS 13
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 2
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 15
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 1
#ifndef _WIN64
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 6
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 0
#define SLJIT_LOCALS_OFFSET_BASE 0
#else /* _WIN64 */
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8
-#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset)
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 10
+#define SLJIT_LOCALS_OFFSET_BASE (4 * SSIZE_OF(sw))
#endif /* !_WIN64 */
#define SLJIT_PREF_SHIFT_REG SLJIT_R3
+#define SLJIT_MASKED_SHIFT 1
+#define SLJIT_MASKED_SHIFT32 1
-#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
-
-#define SLJIT_NUMBER_OF_REGISTERS 12
-#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8
-#define SLJIT_LOCALS_OFFSET_BASE 0
-
-#elif (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2)
+#elif (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32)
#define SLJIT_NUMBER_OF_REGISTERS 12
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 2
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 14
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 8
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 2
#define SLJIT_LOCALS_OFFSET_BASE 0
#elif (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
#define SLJIT_NUMBER_OF_REGISTERS 26
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 10
-#define SLJIT_LOCALS_OFFSET_BASE 0
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 3
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 30
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 8
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 2
+#define SLJIT_LOCALS_OFFSET_BASE (2 * (sljit_s32)sizeof(sljit_sw))
+#define SLJIT_MASKED_SHIFT 1
+#define SLJIT_MASKED_SHIFT32 1
#elif (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
#define SLJIT_NUMBER_OF_REGISTERS 23
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 17
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 3
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 30
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 18
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 2
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) || (defined _AIX)
-#define SLJIT_LOCALS_OFFSET_BASE ((6 + 8) * sizeof(sljit_sw))
+#define SLJIT_LOCALS_OFFSET_BASE ((6 + 8) * (sljit_s32)sizeof(sljit_sw))
#elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
/* Add +1 for double alignment. */
-#define SLJIT_LOCALS_OFFSET_BASE ((3 + 1) * sizeof(sljit_sw))
+#define SLJIT_LOCALS_OFFSET_BASE ((3 + 1) * (sljit_s32)sizeof(sljit_sw))
#else
-#define SLJIT_LOCALS_OFFSET_BASE (3 * sizeof(sljit_sw))
+#define SLJIT_LOCALS_OFFSET_BASE (3 * (sljit_s32)sizeof(sljit_sw))
#endif /* SLJIT_CONFIG_PPC_64 || _AIX */
#elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
@@ -696,20 +660,30 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
#define SLJIT_NUMBER_OF_REGISTERS 21
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-#define SLJIT_LOCALS_OFFSET_BASE (4 * sizeof(sljit_sw))
+#define SLJIT_LOCALS_OFFSET_BASE (4 * (sljit_s32)sizeof(sljit_sw))
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 13
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 6
#else
#define SLJIT_LOCALS_OFFSET_BASE 0
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 29
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 8
#endif
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 5
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 3
+#define SLJIT_MASKED_SHIFT 1
+#define SLJIT_MASKED_SHIFT32 1
-#elif (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC)
+#elif (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV)
-#define SLJIT_NUMBER_OF_REGISTERS 18
-#define SLJIT_NUMBER_OF_SAVED_REGISTERS 14
-#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
-/* saved registers (16), return struct pointer (1), space for 6 argument words (1),
- 4th double arg (2), double alignment (1). */
-#define SLJIT_LOCALS_OFFSET_BASE ((16 + 1 + 6 + 2 + 1) * sizeof(sljit_sw))
-#endif
+#define SLJIT_NUMBER_OF_REGISTERS 23
+#define SLJIT_NUMBER_OF_SAVED_REGISTERS 12
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 5
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 30
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 12
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 2
+#define SLJIT_LOCALS_OFFSET_BASE 0
+#define SLJIT_MASKED_SHIFT 1
+#define SLJIT_MASKED_SHIFT32 1
#elif (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
@@ -736,12 +710,34 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
#define SLJIT_NUMBER_OF_REGISTERS 12
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 3
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 15
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 8
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 1
#define SLJIT_LOCALS_OFFSET_BASE SLJIT_S390X_DEFAULT_STACK_FRAME_SIZE
+#define SLJIT_MASKED_SHIFT 1
+
+#elif (defined SLJIT_CONFIG_LOONGARCH && SLJIT_CONFIG_LOONGARCH)
+
+#define SLJIT_NUMBER_OF_REGISTERS 23
+#define SLJIT_NUMBER_OF_SAVED_REGISTERS 10
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 5
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 30
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 12
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 2
+#define SLJIT_LOCALS_OFFSET_BASE 0
+#define SLJIT_MASKED_SHIFT 1
+#define SLJIT_MASKED_SHIFT32 1
#elif (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED)
+/* Just to have something. */
#define SLJIT_NUMBER_OF_REGISTERS 0
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 0
+#define SLJIT_NUMBER_OF_TEMPORARY_REGISTERS 0
+#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 0
+#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 0
+#define SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS 0
#define SLJIT_LOCALS_OFFSET_BASE 0
#endif
@@ -751,28 +747,74 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
#define SLJIT_NUMBER_OF_SCRATCH_REGISTERS \
(SLJIT_NUMBER_OF_REGISTERS - SLJIT_NUMBER_OF_SAVED_REGISTERS)
-#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 6
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && (defined _WIN64)
-#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 1
-#else
-#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 0
-#endif
-
#define SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS \
(SLJIT_NUMBER_OF_FLOAT_REGISTERS - SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS)
+/**********************************/
+/* Temporary register management. */
+/**********************************/
+
+#define SLJIT_TMP_REGISTER_BASE (SLJIT_NUMBER_OF_REGISTERS + 2)
+#define SLJIT_TMP_FREGISTER_BASE (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+
+/* WARNING: Accessing temporary registers is not recommended, because they
+ are also used by the JIT compiler for various computations. Using them
+ might have any side effects including incorrect operations and crashes,
+ so use them at your own risk. The machine registers themselves might have
+ limitations, e.g. the r0 register on s390x / ppc cannot be used as
+ base address for memory operations. */
+
+/* Temporary registers */
+#define SLJIT_TMP_R0 (SLJIT_TMP_REGISTER_BASE + 0)
+#define SLJIT_TMP_R1 (SLJIT_TMP_REGISTER_BASE + 1)
+#define SLJIT_TMP_R2 (SLJIT_TMP_REGISTER_BASE + 2)
+#define SLJIT_TMP_R3 (SLJIT_TMP_REGISTER_BASE + 3)
+#define SLJIT_TMP_R4 (SLJIT_TMP_REGISTER_BASE + 4)
+#define SLJIT_TMP_R5 (SLJIT_TMP_REGISTER_BASE + 5)
+#define SLJIT_TMP_R6 (SLJIT_TMP_REGISTER_BASE + 6)
+#define SLJIT_TMP_R7 (SLJIT_TMP_REGISTER_BASE + 7)
+#define SLJIT_TMP_R8 (SLJIT_TMP_REGISTER_BASE + 8)
+#define SLJIT_TMP_R9 (SLJIT_TMP_REGISTER_BASE + 9)
+#define SLJIT_TMP_R(i) (SLJIT_TMP_REGISTER_BASE + (i))
+
+#define SLJIT_TMP_FR0 (SLJIT_TMP_FREGISTER_BASE + 0)
+#define SLJIT_TMP_FR1 (SLJIT_TMP_FREGISTER_BASE + 1)
+#define SLJIT_TMP_FR2 (SLJIT_TMP_FREGISTER_BASE + 2)
+#define SLJIT_TMP_FR3 (SLJIT_TMP_FREGISTER_BASE + 3)
+#define SLJIT_TMP_FR4 (SLJIT_TMP_FREGISTER_BASE + 4)
+#define SLJIT_TMP_FR5 (SLJIT_TMP_FREGISTER_BASE + 5)
+#define SLJIT_TMP_FR6 (SLJIT_TMP_FREGISTER_BASE + 6)
+#define SLJIT_TMP_FR7 (SLJIT_TMP_FREGISTER_BASE + 7)
+#define SLJIT_TMP_FR8 (SLJIT_TMP_FREGISTER_BASE + 8)
+#define SLJIT_TMP_FR9 (SLJIT_TMP_FREGISTER_BASE + 9)
+#define SLJIT_TMP_FR(i) (SLJIT_TMP_FREGISTER_BASE + (i))
+
/********************************/
/* CPU status flags management. */
/********************************/
-#if (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \
- || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
+#if (defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM) \
+ || (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \
|| (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) \
- || (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC) \
- || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
+ || (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \
+ || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
+ || (defined SLJIT_CONFIG_LOONGARCH && SLJIT_CONFIG_LOONGARCH)
#define SLJIT_HAS_STATUS_FLAGS_STATE 1
#endif
+/***************************************/
+/* Floating point register management. */
+/***************************************/
+
+#if (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \
+ || (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+#define SLJIT_F64_SECOND(reg) \
+ ((reg) + SLJIT_FS0 + SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS)
+#else /* !SLJIT_CONFIG_ARM_32 && !SLJIT_CONFIG_MIPS_32 */
+#define SLJIT_F64_SECOND(reg) \
+ (reg)
+#endif /* SLJIT_CONFIG_ARM_32 || SLJIT_CONFIG_MIPS_32 */
+
/*************************************/
/* Debug and verbose related macros. */
/*************************************/
diff --git a/src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c b/src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c
index 6e5bf78e45..92d940ddc2 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c
@@ -66,7 +66,7 @@
/* --------------------------------------------------------------------- */
/* 64 KByte. */
-#define CHUNK_SIZE 0x10000
+#define CHUNK_SIZE (sljit_uw)0x10000u
/*
alloc_chunk / free_chunk :
@@ -112,7 +112,7 @@ static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
static SLJIT_INLINE int get_map_jit_flag()
{
- sljit_sw page_size;
+ size_t page_size;
void *ptr;
struct utsname name;
static int map_jit_flag = -1;
@@ -139,8 +139,9 @@ static SLJIT_INLINE int get_map_jit_flag()
#endif /* MAP_ANON */
#else /* !SLJIT_CONFIG_X86 */
#if !(defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM)
-#error Unsupported architecture
+#error "Unsupported architecture"
#endif /* SLJIT_CONFIG_ARM */
+#include <AvailabilityMacros.h>
#include <pthread.h>
#define SLJIT_MAP_JIT (MAP_JIT)
@@ -149,7 +150,11 @@ static SLJIT_INLINE int get_map_jit_flag()
static SLJIT_INLINE void apple_update_wx_flags(sljit_s32 enable_exec)
{
+#if MAC_OS_X_VERSION_MIN_REQUIRED >= 110000
pthread_jit_write_protect_np(enable_exec);
+#else
+#error "Must target Big Sur or newer"
+#endif /* BigSur */
}
#endif /* SLJIT_CONFIG_X86 */
#else /* !TARGET_OS_OSX */
@@ -187,10 +192,13 @@ static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
if (retval == MAP_FAILED)
return NULL;
+#ifdef __FreeBSD__
+ /* HardenedBSD's mmap lies, so check permissions again */
if (mprotect(retval, size, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
munmap(retval, size);
return NULL;
}
+#endif /* FreeBSD */
SLJIT_UPDATE_WX_FLAGS(retval, (uint8_t *)retval + size, 0);
@@ -227,7 +235,7 @@ struct free_block {
#define AS_FREE_BLOCK(base, offset) \
((struct free_block*)(((sljit_u8*)base) + offset))
#define MEM_START(base) ((void*)(((sljit_u8*)base) + sizeof(struct block_header)))
-#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7) & ~7)
+#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7u) & ~(sljit_uw)7)
static struct free_block* free_blocks;
static sljit_uw allocated_size;
diff --git a/src/3rdparty/pcre2/src/sljit/sljitLir.c b/src/3rdparty/pcre2/src/sljit/sljitLir.c
index a24a99ab87..6f19300081 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitLir.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitLir.c
@@ -90,26 +90,29 @@
#if !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED)
+#define SSIZE_OF(type) ((sljit_s32)sizeof(sljit_ ## type))
+
#define VARIABLE_FLAG_SHIFT (10)
-#define VARIABLE_FLAG_MASK (0x3f << VARIABLE_FLAG_SHIFT)
+/* All variable flags are even. */
+#define VARIABLE_FLAG_MASK (0x3e << VARIABLE_FLAG_SHIFT)
#define GET_FLAG_TYPE(op) ((op) >> VARIABLE_FLAG_SHIFT)
#define GET_OPCODE(op) \
- ((op) & ~(SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK))
+ ((op) & ~(SLJIT_32 | SLJIT_SET_Z | VARIABLE_FLAG_MASK))
#define HAS_FLAGS(op) \
((op) & (SLJIT_SET_Z | VARIABLE_FLAG_MASK))
#define GET_ALL_FLAGS(op) \
- ((op) & (SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK))
+ ((op) & (SLJIT_32 | SLJIT_SET_Z | VARIABLE_FLAG_MASK))
#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE)
#define TYPE_CAST_NEEDED(op) \
((op) >= SLJIT_MOV_U8 && (op) <= SLJIT_MOV_S32)
-#else
+#else /* !SLJIT_64BIT_ARCHITECTURE */
#define TYPE_CAST_NEEDED(op) \
((op) >= SLJIT_MOV_U8 && (op) <= SLJIT_MOV_S16)
-#endif
+#endif /* SLJIT_64BIT_ARCHITECTURE */
#define BUF_SIZE 4096
@@ -120,17 +123,33 @@
#endif
/* Parameter parsing. */
-#define REG_MASK 0x3f
+#define REG_MASK 0x7f
#define OFFS_REG(reg) (((reg) >> 8) & REG_MASK)
#define OFFS_REG_MASK (REG_MASK << 8)
#define TO_OFFS_REG(reg) ((reg) << 8)
-/* When reg cannot be unused. */
-#define FAST_IS_REG(reg) ((reg) <= REG_MASK)
-/* When reg can be unused. */
-#define SLOW_IS_REG(reg) ((reg) > 0 && (reg) <= REG_MASK)
+#define FAST_IS_REG(reg) ((reg) < REG_MASK)
/* Mask for argument types. */
-#define SLJIT_DEF_MASK ((1 << SLJIT_DEF_SHIFT) - 1)
+#define SLJIT_ARG_MASK 0x7
+#define SLJIT_ARG_FULL_MASK (SLJIT_ARG_MASK | SLJIT_ARG_TYPE_SCRATCH_REG)
+
+/* Mask for register pairs. */
+#define REG_PAIR_MASK 0x7f00
+#define REG_PAIR_FIRST(reg) ((reg) & 0x7f)
+#define REG_PAIR_SECOND(reg) ((reg) >> 8)
+
+/* Mask for sljit_emit_enter. */
+#define SLJIT_KEPT_SAVEDS_COUNT(options) ((options) & 0x3)
+
+/* Getters for simd operations, which returns with log2(size). */
+#define SLJIT_SIMD_GET_OPCODE(type) ((type) & 0xff)
+#define SLJIT_SIMD_GET_REG_SIZE(type) (((type) >> 12) & 0x3f)
+#define SLJIT_SIMD_GET_ELEM_SIZE(type) (((type) >> 18) & 0x3f)
+#define SLJIT_SIMD_GET_ELEM2_SIZE(type) (((type) >> 24) & 0x3f)
+
+#define SLJIT_SIMD_CHECK_REG(type) (((type) & 0x3f000) >= SLJIT_SIMD_REG_64 && ((type) & 0x3f000) <= SLJIT_SIMD_REG_512)
+#define SLJIT_SIMD_TYPE_MASK(m) ((sljit_s32)0xff000fff & ~(SLJIT_SIMD_FLOAT | SLJIT_SIMD_TEST | (m)))
+#define SLJIT_SIMD_TYPE_MASK2(m) ((sljit_s32)0xc0000fff & ~(SLJIT_SIMD_FLOAT | SLJIT_SIMD_TEST | (m)))
/* Jump flags. */
#define JUMP_LABEL 0x1
@@ -144,16 +163,16 @@
# define PATCH_MD 0x10
#endif
# define TYPE_SHIFT 13
-#endif
+#endif /* SLJIT_CONFIG_X86 */
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
# define IS_BL 0x4
# define PATCH_B 0x8
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 || SLJIT_CONFIG_ARM_V6 */
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
# define CPOOL_SIZE 512
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
#if (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2)
# define IS_COND 0x04
@@ -171,7 +190,7 @@
/* BL + imm24 */
# define PATCH_BL 0x60
/* 0xf00 cc code for branches */
-#endif
+#endif /* SLJIT_CONFIG_ARM_THUMB2 */
#if (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
# define IS_COND 0x004
@@ -181,7 +200,7 @@
# define PATCH_COND 0x040
# define PATCH_ABS48 0x080
# define PATCH_ABS64 0x100
-#endif
+#endif /* SLJIT_CONFIG_ARM_64 */
#if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
# define IS_COND 0x004
@@ -191,9 +210,9 @@
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
# define PATCH_ABS32 0x040
# define PATCH_ABS48 0x080
-#endif
+#endif /* SLJIT_CONFIG_PPC_64 */
# define REMOVE_COND 0x100
-#endif
+#endif /* SLJIT_CONFIG_PPC */
#if (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
# define IS_MOVABLE 0x004
@@ -211,7 +230,7 @@
#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
# define PATCH_ABS32 0x400
# define PATCH_ABS48 0x800
-#endif
+#endif /* SLJIT_CONFIG_MIPS_64 */
/* instruction types */
# define MOVABLE_INS 0
@@ -220,35 +239,46 @@
# define UNMOVABLE_INS 32
/* FPU status register */
# define FCSR_FCC 33
-#endif
+#endif /* SLJIT_CONFIG_MIPS */
-#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
-# define IS_MOVABLE 0x04
-# define IS_COND 0x08
-# define IS_CALL 0x10
-
-# define PATCH_B 0x20
-# define PATCH_CALL 0x40
+#if (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV)
+# define IS_COND 0x004
+# define IS_CALL 0x008
- /* instruction types */
-# define MOVABLE_INS 0
- /* 1 - 31 last destination register */
- /* no destination (i.e: store) */
-# define UNMOVABLE_INS 32
+# define PATCH_B 0x010
+# define PATCH_J 0x020
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+# define PATCH_REL32 0x040
+# define PATCH_ABS32 0x080
+# define PATCH_ABS44 0x100
+# define PATCH_ABS52 0x200
+#else /* !SLJIT_CONFIG_RISCV_64 */
+# define PATCH_REL32 0x0
+#endif /* SLJIT_CONFIG_RISCV_64 */
+#endif /* SLJIT_CONFIG_RISCV */
+
+#if (defined SLJIT_CONFIG_LOONGARCH && SLJIT_CONFIG_LOONGARCH)
+# define IS_COND 0x004
+# define IS_CALL 0x008
-# define DST_INS_MASK 0xff
+# define PATCH_B 0x010
+# define PATCH_J 0x020
- /* ICC_SET is the same as SET_FLAGS. */
-# define ICC_IS_SET (1 << 23)
-# define FCC_IS_SET (1 << 24)
-#endif
+# define PATCH_REL32 0x040
+# define PATCH_ABS32 0x080
+# define PATCH_ABS52 0x100
+#endif /* SLJIT_CONFIG_LOONGARCH */
/* Stack management. */
#define GET_SAVED_REGISTERS_SIZE(scratches, saveds, extra) \
(((scratches < SLJIT_NUMBER_OF_SCRATCH_REGISTERS ? 0 : (scratches - SLJIT_NUMBER_OF_SCRATCH_REGISTERS)) + \
- (saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? saveds : SLJIT_NUMBER_OF_SAVED_REGISTERS) + \
- extra) * sizeof(sljit_sw))
+ (saveds) + (sljit_s32)(extra)) * (sljit_s32)sizeof(sljit_sw))
+
+#define GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, type) \
+ (((fscratches < SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS ? 0 : (fscratches - SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS)) + \
+ (fsaveds)) * SSIZE_OF(type))
#define ADJUST_LOCAL_OFFSET(p, i) \
if ((p) == (SLJIT_MEM1(SLJIT_SP))) \
@@ -264,25 +294,49 @@
#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
#if (defined SLJIT_PROT_EXECUTABLE_ALLOCATOR && SLJIT_PROT_EXECUTABLE_ALLOCATOR)
-#include "sljitProtExecAllocator.c"
-#elif (defined SLJIT_WX_EXECUTABLE_ALLOCATOR && SLJIT_WX_EXECUTABLE_ALLOCATOR)
-#include "sljitWXExecAllocator.c"
+
+#if defined(__NetBSD__)
+#include "allocator_src/sljitProtExecAllocatorNetBSD.c"
#else
-#include "sljitExecAllocator.c"
+#include "allocator_src/sljitProtExecAllocatorPosix.c"
#endif
+#elif (defined SLJIT_WX_EXECUTABLE_ALLOCATOR && SLJIT_WX_EXECUTABLE_ALLOCATOR)
+
+#if defined(_WIN32)
+#include "allocator_src/sljitWXExecAllocatorWindows.c"
+#else
+#include "allocator_src/sljitWXExecAllocatorPosix.c"
#endif
-#if (defined SLJIT_PROT_EXECUTABLE_ALLOCATOR && SLJIT_PROT_EXECUTABLE_ALLOCATOR)
-#define SLJIT_ADD_EXEC_OFFSET(ptr, exec_offset) ((sljit_u8 *)(ptr) + (exec_offset))
#else
-#define SLJIT_ADD_EXEC_OFFSET(ptr, exec_offset) ((sljit_u8 *)(ptr))
+
+#if defined(_WIN32)
+#include "allocator_src/sljitExecAllocatorWindows.c"
+#elif defined(__APPLE__)
+#include "allocator_src/sljitExecAllocatorApple.c"
+#elif defined(__FreeBSD__)
+#include "allocator_src/sljitExecAllocatorFreeBSD.c"
+#else
+#include "allocator_src/sljitExecAllocatorPosix.c"
#endif
+#endif
+
+#else /* !SLJIT_EXECUTABLE_ALLOCATOR */
+
#ifndef SLJIT_UPDATE_WX_FLAGS
#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
#endif
+#endif /* SLJIT_EXECUTABLE_ALLOCATOR */
+
+#if (defined SLJIT_PROT_EXECUTABLE_ALLOCATOR && SLJIT_PROT_EXECUTABLE_ALLOCATOR)
+#define SLJIT_ADD_EXEC_OFFSET(ptr, exec_offset) ((sljit_u8 *)(ptr) + (exec_offset))
+#else
+#define SLJIT_ADD_EXEC_OFFSET(ptr, exec_offset) ((sljit_u8 *)(ptr))
+#endif
+
/* Argument checking features. */
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
@@ -379,11 +433,9 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allo
&& (sizeof(sljit_sw) == 4 || sizeof(sljit_sw) == 8)
&& (sizeof(sljit_uw) == 4 || sizeof(sljit_uw) == 8),
invalid_integer_types);
- SLJIT_COMPILE_ASSERT(SLJIT_I32_OP == SLJIT_F32_OP,
- int_op_and_single_op_must_be_the_same);
- SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_F32_OP,
+ SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_32,
rewritable_jump_and_single_op_must_not_be_the_same);
- SLJIT_COMPILE_ASSERT(!(SLJIT_EQUAL & 0x1) && !(SLJIT_LESS & 0x1) && !(SLJIT_EQUAL_F64 & 0x1) && !(SLJIT_JUMP & 0x1),
+ SLJIT_COMPILE_ASSERT(!(SLJIT_EQUAL & 0x1) && !(SLJIT_LESS & 0x1) && !(SLJIT_F_EQUAL & 0x1) && !(SLJIT_JUMP & 0x1),
conditional_flags_must_be_even_numbers);
/* Only the non-zero members must be set. */
@@ -415,10 +467,10 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allo
compiler->local_size = -1;
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- compiler->args = -1;
-#endif
+ compiler->args_size = -1;
+#endif /* SLJIT_CONFIG_X86_32 */
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
compiler->cpool = (sljit_uw*)SLJIT_MALLOC(CPOOL_SIZE * sizeof(sljit_uw)
+ CPOOL_SIZE * sizeof(sljit_u8), allocator_data);
if (!compiler->cpool) {
@@ -429,15 +481,18 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allo
}
compiler->cpool_unique = (sljit_u8*)(compiler->cpool + CPOOL_SIZE);
compiler->cpool_diff = 0xffffffff;
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
#if (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
compiler->delay_slot = UNMOVABLE_INS;
-#endif
+#endif /* SLJIT_CONFIG_MIPS */
-#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
- compiler->delay_slot = UNMOVABLE_INS;
-#endif
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \
+ || (defined SLJIT_DEBUG && SLJIT_DEBUG)
+ compiler->last_flags = 0;
+ compiler->last_return = -1;
+ compiler->logical_local_size = 0;
+#endif /* SLJIT_ARGUMENT_CHECKS || SLJIT_DEBUG */
#if (defined SLJIT_NEEDS_COMPILER_INIT && SLJIT_NEEDS_COMPILER_INIT)
if (!compiler_initialized) {
@@ -470,7 +525,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compile
SLJIT_FREE(curr, allocator_data);
}
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
SLJIT_FREE(compiler->cpool, allocator_data);
#endif
SLJIT_FREE(compiler, allocator_data);
@@ -488,7 +543,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code, void *exec_allocator_d
SLJIT_UNUSED_ARG(exec_allocator_data);
/* Remove thumb mode flag. */
- SLJIT_FREE_EXEC((void*)((sljit_uw)code & ~0x1), exec_allocator_data);
+ SLJIT_FREE_EXEC((void*)((sljit_uw)code & ~(sljit_uw)0x1), exec_allocator_data);
}
#elif (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL)
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code, void *exec_allocator_data)
@@ -511,7 +566,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code, void *exec_allocator_d
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_label(struct sljit_jump *jump, struct sljit_label* label)
{
if (SLJIT_LIKELY(!!jump) && SLJIT_LIKELY(!!label)) {
- jump->flags &= ~JUMP_ADDR;
+ jump->flags &= (sljit_uw)~JUMP_ADDR;
jump->flags |= JUMP_LABEL;
jump->u.label = label;
}
@@ -520,7 +575,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_label(struct sljit_jump *jump, struct sl
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_target(struct sljit_jump *jump, sljit_uw target)
{
if (SLJIT_LIKELY(!!jump)) {
- jump->flags &= ~JUMP_LABEL;
+ jump->flags &= (sljit_uw)~JUMP_LABEL;
jump->flags |= JUMP_ADDR;
jump->u.target = target;
}
@@ -533,7 +588,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_put_label(struct sljit_put_label *put_la
}
#define SLJIT_CURRENT_FLAGS_ALL \
- (SLJIT_CURRENT_FLAGS_I32_OP | SLJIT_CURRENT_FLAGS_ADD_SUB | SLJIT_CURRENT_FLAGS_COMPARE)
+ (SLJIT_CURRENT_FLAGS_32 | SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB | SLJIT_CURRENT_FLAGS_COMPARE)
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *compiler, sljit_s32 current_flags)
{
@@ -547,7 +602,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *com
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
compiler->last_flags = 0;
if ((current_flags & ~(VARIABLE_FLAG_MASK | SLJIT_SET_Z | SLJIT_CURRENT_FLAGS_ALL)) == 0) {
- compiler->last_flags = GET_FLAG_TYPE(current_flags) | (current_flags & (SLJIT_I32_OP | SLJIT_SET_Z));
+ compiler->last_flags = GET_FLAG_TYPE(current_flags) | (current_flags & (SLJIT_32 | SLJIT_SET_Z));
}
#endif
}
@@ -607,7 +662,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compile
return NULL;
size = (size + 3) & ~3;
#endif
- return ensure_abuf(compiler, size);
+ return ensure_abuf(compiler, (sljit_uw)size);
}
static SLJIT_INLINE void reverse_buf(struct sljit_compiler *compiler)
@@ -626,20 +681,6 @@ static SLJIT_INLINE void reverse_buf(struct sljit_compiler *compiler)
compiler->buf = prev;
}
-static SLJIT_INLINE sljit_s32 get_arg_count(sljit_s32 arg_types)
-{
- sljit_s32 arg_count = 0;
-
- arg_types >>= SLJIT_DEF_SHIFT;
- while (arg_types) {
- arg_count++;
- arg_types >>= SLJIT_DEF_SHIFT;
- }
-
- return arg_count;
-}
-
-
/* Only used in RISC architectures where the instruction size is constant */
#if !(defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
&& !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
@@ -679,6 +720,7 @@ static SLJIT_INLINE void set_emit_enter(struct sljit_compiler *compiler,
compiler->fscratches = fscratches;
compiler->fsaveds = fsaveds;
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->last_return = args & SLJIT_ARG_MASK;
compiler->logical_local_size = local_size;
#endif
}
@@ -696,6 +738,7 @@ static SLJIT_INLINE void set_set_context(struct sljit_compiler *compiler,
compiler->fscratches = fscratches;
compiler->fsaveds = fsaveds;
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->last_return = args & SLJIT_ARG_MASK;
compiler->logical_local_size = local_size;
#endif
}
@@ -711,7 +754,7 @@ static SLJIT_INLINE void set_label(struct sljit_label *label, struct sljit_compi
compiler->last_label = label;
}
-static SLJIT_INLINE void set_jump(struct sljit_jump *jump, struct sljit_compiler *compiler, sljit_s32 flags)
+static SLJIT_INLINE void set_jump(struct sljit_jump *jump, struct sljit_compiler *compiler, sljit_u32 flags)
{
jump->next = NULL;
jump->flags = flags;
@@ -751,13 +794,62 @@ static SLJIT_INLINE void set_put_label(struct sljit_put_label *put_label, struct
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+static sljit_s32 function_check_arguments(sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, sljit_s32 fscratches)
+{
+ sljit_s32 word_arg_count, scratch_arg_end, saved_arg_count, float_arg_count, curr_type;
+
+ curr_type = (arg_types & SLJIT_ARG_FULL_MASK);
+
+ if (curr_type >= SLJIT_ARG_TYPE_F64) {
+ if (curr_type > SLJIT_ARG_TYPE_F32 || fscratches == 0)
+ return 0;
+ } else if (curr_type >= SLJIT_ARG_TYPE_W) {
+ if (scratches == 0)
+ return 0;
+ }
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+
+ word_arg_count = 0;
+ scratch_arg_end = 0;
+ saved_arg_count = 0;
+ float_arg_count = 0;
+ while (arg_types != 0) {
+ if (word_arg_count + float_arg_count >= 4)
+ return 0;
+
+ curr_type = (arg_types & SLJIT_ARG_MASK);
+
+ if (arg_types & SLJIT_ARG_TYPE_SCRATCH_REG) {
+ if (saveds == -1 || curr_type < SLJIT_ARG_TYPE_W || curr_type > SLJIT_ARG_TYPE_P)
+ return 0;
+
+ word_arg_count++;
+ scratch_arg_end = word_arg_count;
+ } else {
+ if (curr_type < SLJIT_ARG_TYPE_W || curr_type > SLJIT_ARG_TYPE_F32)
+ return 0;
+
+ if (curr_type < SLJIT_ARG_TYPE_F64) {
+ word_arg_count++;
+ saved_arg_count++;
+ } else
+ float_arg_count++;
+ }
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
+
+ if (saveds == -1)
+ return (word_arg_count <= scratches && float_arg_count <= fscratches);
+
+ return (saved_arg_count <= saveds && scratch_arg_end <= scratches && float_arg_count <= fscratches);
+}
+
#define FUNCTION_CHECK_IS_REG(r) \
(((r) >= SLJIT_R0 && (r) < (SLJIT_R0 + compiler->scratches)) \
- || ((r) > (SLJIT_S0 - compiler->saveds) && (r) <= SLJIT_S0))
-
-#define FUNCTION_CHECK_IS_FREG(fr) \
- (((fr) >= SLJIT_FR0 && (fr) < (SLJIT_FR0 + compiler->fscratches)) \
- || ((fr) > (SLJIT_FS0 - compiler->fsaveds) && (fr) <= SLJIT_FS0))
+ || ((r) > (SLJIT_S0 - compiler->saveds) && (r) <= SLJIT_S0) \
+ || ((r) >= SLJIT_TMP_REGISTER_BASE && (r) < (SLJIT_TMP_REGISTER_BASE + SLJIT_NUMBER_OF_TEMPORARY_REGISTERS)))
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
#define CHECK_IF_VIRTUAL_REGISTER(p) ((p) <= SLJIT_S3 && (p) >= SLJIT_S8)
@@ -767,20 +859,23 @@ static SLJIT_INLINE void set_put_label(struct sljit_put_label *put_label, struct
static sljit_s32 function_check_src_mem(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
{
- if (compiler->scratches == -1 || compiler->saveds == -1)
+ if (compiler->scratches == -1)
return 0;
if (!(p & SLJIT_MEM))
return 0;
- if (!((p & REG_MASK) == SLJIT_UNUSED || FUNCTION_CHECK_IS_REG(p & REG_MASK)))
+ if (p == SLJIT_MEM1(SLJIT_SP))
+ return (i >= 0 && i < compiler->logical_local_size);
+
+ if (!(!(p & REG_MASK) || FUNCTION_CHECK_IS_REG(p & REG_MASK)))
return 0;
if (CHECK_IF_VIRTUAL_REGISTER(p & REG_MASK))
return 0;
if (p & OFFS_REG_MASK) {
- if ((p & REG_MASK) == SLJIT_UNUSED)
+ if (!(p & REG_MASK))
return 0;
if (!(FUNCTION_CHECK_IS_REG(OFFS_REG(p))))
@@ -801,7 +896,7 @@ static sljit_s32 function_check_src_mem(struct sljit_compiler *compiler, sljit_s
static sljit_s32 function_check_src(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
{
- if (compiler->scratches == -1 || compiler->saveds == -1)
+ if (compiler->scratches == -1)
return 0;
if (FUNCTION_CHECK_IS_REG(p))
@@ -810,48 +905,79 @@ static sljit_s32 function_check_src(struct sljit_compiler *compiler, sljit_s32 p
if (p == SLJIT_IMM)
return 1;
- if (p == SLJIT_MEM1(SLJIT_SP))
- return (i >= 0 && i < compiler->logical_local_size);
-
return function_check_src_mem(compiler, p, i);
}
#define FUNCTION_CHECK_SRC(p, i) \
CHECK_ARGUMENT(function_check_src(compiler, p, i));
-static sljit_s32 function_check_dst(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i, sljit_s32 unused)
+static sljit_s32 function_check_dst(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
{
- if (compiler->scratches == -1 || compiler->saveds == -1)
+ if (compiler->scratches == -1)
return 0;
- if (FUNCTION_CHECK_IS_REG(p) || ((unused) && (p) == SLJIT_UNUSED))
+ if (FUNCTION_CHECK_IS_REG(p))
return (i == 0);
- if (p == SLJIT_MEM1(SLJIT_SP))
- return (i >= 0 && i < compiler->logical_local_size);
+ return function_check_src_mem(compiler, p, i);
+}
+
+#define FUNCTION_CHECK_DST(p, i) \
+ CHECK_ARGUMENT(function_check_dst(compiler, p, i));
+
+#if (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \
+ || (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+
+#define FUNCTION_CHECK_IS_FREG(fr, is_32) \
+ function_check_is_freg(compiler, (fr), (is_32))
+
+static sljit_s32 function_check_is_freg(struct sljit_compiler *compiler, sljit_s32 fr, sljit_s32 is_32);
+
+#define FUNCTION_FCHECK(p, i, is_32) \
+ CHECK_ARGUMENT(function_fcheck(compiler, (p), (i), (is_32)));
+
+static sljit_s32 function_fcheck(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i, sljit_s32 is_32)
+{
+ if (compiler->scratches == -1)
+ return 0;
+
+ if (FUNCTION_CHECK_IS_FREG(p, is_32))
+ return (i == 0);
return function_check_src_mem(compiler, p, i);
}
-#define FUNCTION_CHECK_DST(p, i, unused) \
- CHECK_ARGUMENT(function_check_dst(compiler, p, i, unused));
+#else /* !SLJIT_CONFIG_ARM_32 && !SLJIT_CONFIG_MIPS_32 */
+#define FUNCTION_CHECK_IS_FREG(fr, is_32) \
+ function_check_is_freg(compiler, (fr))
+
+static sljit_s32 function_check_is_freg(struct sljit_compiler *compiler, sljit_s32 fr)
+{
+ if (compiler->scratches == -1)
+ return 0;
+
+ return (fr >= SLJIT_FR0 && fr < (SLJIT_FR0 + compiler->fscratches))
+ || (fr > (SLJIT_FS0 - compiler->fsaveds) && fr <= SLJIT_FS0)
+ || (fr >= SLJIT_TMP_FREGISTER_BASE && fr < (SLJIT_TMP_FREGISTER_BASE + SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS));
+}
+
+#define FUNCTION_FCHECK(p, i, is_32) \
+ CHECK_ARGUMENT(function_fcheck(compiler, (p), (i)));
static sljit_s32 function_fcheck(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
{
- if (compiler->scratches == -1 || compiler->saveds == -1)
+ if (compiler->scratches == -1)
return 0;
- if (FUNCTION_CHECK_IS_FREG(p))
+ if ((p >= SLJIT_FR0 && p < (SLJIT_FR0 + compiler->fscratches))
+ || (p > (SLJIT_FS0 - compiler->fsaveds) && p <= SLJIT_FS0)
+ || (p >= SLJIT_TMP_FREGISTER_BASE && p < (SLJIT_TMP_FREGISTER_BASE + SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS)))
return (i == 0);
- if (p == SLJIT_MEM1(SLJIT_SP))
- return (i >= 0 && i < compiler->logical_local_size);
-
return function_check_src_mem(compiler, p, i);
}
-#define FUNCTION_FCHECK(p, i) \
- CHECK_ARGUMENT(function_fcheck(compiler, p, i));
+#endif /* SLJIT_CONFIG_ARM_32 || SLJIT_CONFIG_MIPS_32 */
#endif /* SLJIT_ARGUMENT_CHECKS */
@@ -864,7 +990,11 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *comp
#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE)
#ifdef _WIN64
+#ifdef __GNUC__
+# define SLJIT_PRINT_D "ll"
+#else
# define SLJIT_PRINT_D "I64"
+#endif
#else
# define SLJIT_PRINT_D "l"
#endif
@@ -876,23 +1006,35 @@ static void sljit_verbose_reg(struct sljit_compiler *compiler, sljit_s32 r)
{
if (r < (SLJIT_R0 + compiler->scratches))
fprintf(compiler->verbose, "r%d", r - SLJIT_R0);
- else if (r != SLJIT_SP)
+ else if (r < SLJIT_SP)
fprintf(compiler->verbose, "s%d", SLJIT_NUMBER_OF_REGISTERS - r);
- else
+ else if (r == SLJIT_SP)
fprintf(compiler->verbose, "sp");
+ else
+ fprintf(compiler->verbose, "t%d", r - SLJIT_TMP_REGISTER_BASE);
}
static void sljit_verbose_freg(struct sljit_compiler *compiler, sljit_s32 r)
{
+#if (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \
+ || (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ if (r >= SLJIT_F64_SECOND(SLJIT_FR0)) {
+ fprintf(compiler->verbose, "^");
+ r -= SLJIT_F64_SECOND(0);
+ }
+#endif /* SLJIT_CONFIG_ARM_32 || SLJIT_CONFIG_MIPS_32 */
+
if (r < (SLJIT_FR0 + compiler->fscratches))
fprintf(compiler->verbose, "fr%d", r - SLJIT_FR0);
- else
+ else if (r < SLJIT_TMP_FREGISTER_BASE)
fprintf(compiler->verbose, "fs%d", SLJIT_NUMBER_OF_FLOAT_REGISTERS - r);
+ else
+ fprintf(compiler->verbose, "ft%d", r - SLJIT_TMP_FREGISTER_BASE);
}
static void sljit_verbose_param(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
{
- if ((p) & SLJIT_IMM)
+ if ((p) == SLJIT_IMM)
fprintf(compiler->verbose, "#%" SLJIT_PRINT_D "d", (i));
else if ((p) & SLJIT_MEM) {
if ((p) & REG_MASK) {
@@ -910,10 +1052,8 @@ static void sljit_verbose_param(struct sljit_compiler *compiler, sljit_s32 p, sl
}
else
fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i));
- } else if (p)
+ } else
sljit_verbose_reg(compiler, p);
- else
- fprintf(compiler->verbose, "unused");
}
static void sljit_verbose_fparam(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
@@ -940,63 +1080,87 @@ static void sljit_verbose_fparam(struct sljit_compiler *compiler, sljit_s32 p, s
}
static const char* op0_names[] = {
- (char*)"breakpoint", (char*)"nop", (char*)"lmul.uw", (char*)"lmul.sw",
- (char*)"divmod.u", (char*)"divmod.s", (char*)"div.u", (char*)"div.s",
- (char*)"endbr", (char*)"skip_frames_before_return"
+ "breakpoint", "nop", "lmul.uw", "lmul.sw",
+ "divmod.u", "divmod.s", "div.u", "div.s",
+ "endbr", "skip_frames_before_return"
};
static const char* op1_names[] = {
- (char*)"", (char*)".u8", (char*)".s8", (char*)".u16",
- (char*)".s16", (char*)".u32", (char*)".s32", (char*)".p",
- (char*)"", (char*)".u8", (char*)".s8", (char*)".u16",
- (char*)".s16", (char*)".u32", (char*)".s32", (char*)".p",
- (char*)"not", (char*)"neg", (char*)"clz",
+ "mov", "mov", "mov", "mov",
+ "mov", "mov", "mov", "mov",
+ "mov", "clz", "ctz", "rev",
+ "rev", "rev", "rev", "rev"
+};
+
+static const char* op1_types[] = {
+ "", ".u8", ".s8", ".u16",
+ ".s16", ".u32", ".s32", "32",
+ ".p", "", "", "",
+ ".u16", ".s16", ".u32", ".s32"
};
static const char* op2_names[] = {
- (char*)"add", (char*)"addc", (char*)"sub", (char*)"subc",
- (char*)"mul", (char*)"and", (char*)"or", (char*)"xor",
- (char*)"shl", (char*)"lshr", (char*)"ashr",
+ "add", "addc", "sub", "subc",
+ "mul", "and", "or", "xor",
+ "shl", "mshl", "lshr", "mlshr",
+ "ashr", "mashr", "rotl", "rotr"
};
-static const char* op_src_names[] = {
- (char*)"fast_return", (char*)"skip_frames_before_fast_return",
- (char*)"prefetch_l1", (char*)"prefetch_l2",
- (char*)"prefetch_l3", (char*)"prefetch_once",
+static const char* op_src_dst_names[] = {
+ "fast_return", "skip_frames_before_fast_return",
+ "prefetch_l1", "prefetch_l2",
+ "prefetch_l3", "prefetch_once",
+ "fast_enter", "get_return_address"
};
static const char* fop1_names[] = {
- (char*)"mov", (char*)"conv", (char*)"conv", (char*)"conv",
- (char*)"conv", (char*)"conv", (char*)"cmp", (char*)"neg",
- (char*)"abs",
+ "mov", "conv", "conv", "conv",
+ "conv", "conv", "conv", "conv",
+ "cmp", "neg", "abs",
+};
+
+static const char* fop1_conv_types[] = {
+ "sw", "s32", "sw", "s32",
+ "uw", "u32"
};
static const char* fop2_names[] = {
- (char*)"add", (char*)"sub", (char*)"mul", (char*)"div"
+ "add", "sub", "mul", "div"
+};
+
+static const char* fop2r_names[] = {
+ "copysign"
};
-#define JUMP_POSTFIX(type) \
- ((type & 0xff) <= SLJIT_NOT_OVERFLOW ? ((type & SLJIT_I32_OP) ? "32" : "") \
- : ((type & 0xff) <= SLJIT_ORDERED_F64 ? ((type & SLJIT_F32_OP) ? ".f32" : ".f64") : ""))
-
-static char* jump_names[] = {
- (char*)"equal", (char*)"not_equal",
- (char*)"less", (char*)"greater_equal",
- (char*)"greater", (char*)"less_equal",
- (char*)"sig_less", (char*)"sig_greater_equal",
- (char*)"sig_greater", (char*)"sig_less_equal",
- (char*)"overflow", (char*)"not_overflow",
- (char*)"carry", (char*)"",
- (char*)"equal", (char*)"not_equal",
- (char*)"less", (char*)"greater_equal",
- (char*)"greater", (char*)"less_equal",
- (char*)"unordered", (char*)"ordered",
- (char*)"jump", (char*)"fast_call",
- (char*)"call", (char*)"call.cdecl"
+static const char* simd_op2_names[] = {
+ "and", "or", "xor"
};
-static char* call_arg_names[] = {
- (char*)"void", (char*)"sw", (char*)"uw", (char*)"s32", (char*)"u32", (char*)"f32", (char*)"f64"
+static const char* jump_names[] = {
+ "equal", "not_equal",
+ "less", "greater_equal",
+ "greater", "less_equal",
+ "sig_less", "sig_greater_equal",
+ "sig_greater", "sig_less_equal",
+ "overflow", "not_overflow",
+ "carry", "not_carry",
+ "atomic_stored", "atomic_not_stored",
+ "f_equal", "f_not_equal",
+ "f_less", "f_greater_equal",
+ "f_greater", "f_less_equal",
+ "unordered", "ordered",
+ "ordered_equal", "unordered_or_not_equal",
+ "ordered_less", "unordered_or_greater_equal",
+ "ordered_greater", "unordered_or_less_equal",
+ "unordered_or_equal", "ordered_not_equal",
+ "unordered_or_less", "ordered_greater_equal",
+ "unordered_or_greater", "ordered_less_equal",
+ "jump", "fast_call",
+ "call", "call_reg_arg"
+};
+
+static const char* call_arg_names[] = {
+ "void", "w", "32", "p", "f64", "f32"
};
#endif /* SLJIT_VERBOSE */
@@ -1008,6 +1172,8 @@ static char* call_arg_names[] = {
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \
|| (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+#define SLJIT_SKIP_CHECKS(compiler) (compiler)->skip_checks = 1
+
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_generate_code(struct sljit_compiler *compiler)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
@@ -1032,48 +1198,53 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_enter(struct sljit_compil
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
-#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- sljit_s32 types, arg_count, curr_type;
-#endif
-
SLJIT_UNUSED_ARG(compiler);
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(!(options & ~SLJIT_F64_ALIGNMENT));
+ if (options & SLJIT_ENTER_REG_ARG) {
+ CHECK_ARGUMENT(!(options & ~(0x3 | SLJIT_ENTER_REG_ARG)));
+ } else {
+ CHECK_ARGUMENT(options == 0);
+ }
+ CHECK_ARGUMENT(SLJIT_KEPT_SAVEDS_COUNT(options) <= 3 && SLJIT_KEPT_SAVEDS_COUNT(options) <= saveds);
CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS);
- CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_REGISTERS);
+ CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_SAVED_REGISTERS);
CHECK_ARGUMENT(scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS);
CHECK_ARGUMENT(fscratches >= 0 && fscratches <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
- CHECK_ARGUMENT(fsaveds >= 0 && fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
+ CHECK_ARGUMENT(fsaveds >= 0 && fsaveds <= SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS);
CHECK_ARGUMENT(fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
CHECK_ARGUMENT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE);
- CHECK_ARGUMENT((arg_types & SLJIT_DEF_MASK) == 0);
-
- types = (arg_types >> SLJIT_DEF_SHIFT);
- arg_count = 0;
- while (types != 0 && arg_count < 3) {
- curr_type = (types & SLJIT_DEF_MASK);
- CHECK_ARGUMENT(curr_type == SLJIT_ARG_TYPE_SW || curr_type == SLJIT_ARG_TYPE_UW);
- arg_count++;
- types >>= SLJIT_DEF_SHIFT;
- }
- CHECK_ARGUMENT(arg_count <= saveds && types == 0);
+ CHECK_ARGUMENT((arg_types & SLJIT_ARG_FULL_MASK) <= SLJIT_ARG_TYPE_F32);
+ CHECK_ARGUMENT(function_check_arguments(arg_types, scratches, (options & SLJIT_ENTER_REG_ARG) ? 0 : saveds, fscratches));
compiler->last_flags = 0;
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " enter options:%s args[", (options & SLJIT_F64_ALIGNMENT) ? "f64_align" : "");
-
- arg_types >>= SLJIT_DEF_SHIFT;
- while (arg_types) {
- fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]);
- arg_types >>= SLJIT_DEF_SHIFT;
- if (arg_types)
- fprintf(compiler->verbose, ",");
+ fprintf(compiler->verbose, " enter ret[%s", call_arg_names[arg_types & SLJIT_ARG_MASK]);
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ if (arg_types) {
+ fprintf(compiler->verbose, "], args[");
+ do {
+ fprintf(compiler->verbose, "%s%s", call_arg_names[arg_types & SLJIT_ARG_MASK],
+ (arg_types & SLJIT_ARG_TYPE_SCRATCH_REG) ? "_r" : "");
+ arg_types >>= SLJIT_ARG_SHIFT;
+ if (arg_types)
+ fprintf(compiler->verbose, ",");
+ } while (arg_types);
+ }
+
+ fprintf(compiler->verbose, "],");
+
+ if (options & SLJIT_ENTER_REG_ARG) {
+ fprintf(compiler->verbose, " enter:reg_arg,");
+
+ if (SLJIT_KEPT_SAVEDS_COUNT(options) > 0)
+ fprintf(compiler->verbose, " keep:%d,", SLJIT_KEPT_SAVEDS_COUNT(options));
}
- fprintf(compiler->verbose, "] scratches:%d saveds:%d fscratches:%d fsaveds:%d local_size:%d\n",
+ fprintf(compiler->verbose, " scratches:%d, saveds:%d, fscratches:%d, fsaveds:%d, local_size:%d\n",
scratches, saveds, fscratches, fsaveds, local_size);
}
#endif
@@ -1084,89 +1255,140 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_set_context(struct sljit_compi
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
-#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- sljit_s32 types, arg_count, curr_type;
-#endif
-
SLJIT_UNUSED_ARG(compiler);
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(!(options & ~SLJIT_F64_ALIGNMENT));
+ if (options & SLJIT_ENTER_REG_ARG) {
+ CHECK_ARGUMENT(!(options & ~(0x3 | SLJIT_ENTER_REG_ARG)));
+ } else {
+ CHECK_ARGUMENT(options == 0);
+ }
+ CHECK_ARGUMENT(SLJIT_KEPT_SAVEDS_COUNT(options) <= 3 && SLJIT_KEPT_SAVEDS_COUNT(options) <= saveds);
CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS);
- CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_REGISTERS);
+ CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_SAVED_REGISTERS);
CHECK_ARGUMENT(scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS);
CHECK_ARGUMENT(fscratches >= 0 && fscratches <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
- CHECK_ARGUMENT(fsaveds >= 0 && fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
+ CHECK_ARGUMENT(fsaveds >= 0 && fsaveds <= SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS);
CHECK_ARGUMENT(fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
CHECK_ARGUMENT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE);
-
- types = (arg_types >> SLJIT_DEF_SHIFT);
- arg_count = 0;
- while (types != 0 && arg_count < 3) {
- curr_type = (types & SLJIT_DEF_MASK);
- CHECK_ARGUMENT(curr_type == SLJIT_ARG_TYPE_SW || curr_type == SLJIT_ARG_TYPE_UW);
- arg_count++;
- types >>= SLJIT_DEF_SHIFT;
- }
- CHECK_ARGUMENT(arg_count <= saveds && types == 0);
+ CHECK_ARGUMENT((arg_types & SLJIT_ARG_FULL_MASK) < SLJIT_ARG_TYPE_F64);
+ CHECK_ARGUMENT(function_check_arguments(arg_types, scratches, (options & SLJIT_ENTER_REG_ARG) ? 0 : saveds, fscratches));
compiler->last_flags = 0;
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " set_context options:%s args[", (options & SLJIT_F64_ALIGNMENT) ? "f64_align" : "");
-
- arg_types >>= SLJIT_DEF_SHIFT;
- while (arg_types) {
- fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]);
- arg_types >>= SLJIT_DEF_SHIFT;
- if (arg_types)
- fprintf(compiler->verbose, ",");
+ fprintf(compiler->verbose, " set_context ret[%s", call_arg_names[arg_types & SLJIT_ARG_MASK]);
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ if (arg_types) {
+ fprintf(compiler->verbose, "], args[");
+ do {
+ fprintf(compiler->verbose, "%s%s", call_arg_names[arg_types & SLJIT_ARG_MASK],
+ (arg_types & SLJIT_ARG_TYPE_SCRATCH_REG) ? "_r" : "");
+ arg_types >>= SLJIT_ARG_SHIFT;
+ if (arg_types)
+ fprintf(compiler->verbose, ",");
+ } while (arg_types);
+ }
+
+ fprintf(compiler->verbose, "],");
+
+ if (options & SLJIT_ENTER_REG_ARG) {
+ fprintf(compiler->verbose, " enter:reg_arg,");
+
+ if (SLJIT_KEPT_SAVEDS_COUNT(options) > 0)
+ fprintf(compiler->verbose, " keep:%d,", SLJIT_KEPT_SAVEDS_COUNT(options));
}
- fprintf(compiler->verbose, "] scratches:%d saveds:%d fscratches:%d fsaveds:%d local_size:%d\n",
+ fprintf(compiler->verbose, " scratches:%d, saveds:%d, fscratches:%d, fsaveds:%d, local_size:%d\n",
scratches, saveds, fscratches, fsaveds, local_size);
}
#endif
CHECK_RETURN_OK;
}
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_return_void(struct sljit_compiler *compiler)
+{
+ if (SLJIT_UNLIKELY(compiler->skip_checks)) {
+ compiler->skip_checks = 0;
+ CHECK_RETURN_OK;
+ }
+
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(compiler->last_return == SLJIT_ARG_TYPE_RET_VOID);
+#endif
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " return_void\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(compiler->scratches >= 0);
- if (op != SLJIT_UNUSED) {
- CHECK_ARGUMENT(op >= SLJIT_MOV && op <= SLJIT_MOV_P);
+
+ switch (compiler->last_return) {
+ case SLJIT_ARG_TYPE_W:
+ CHECK_ARGUMENT(op >= SLJIT_MOV && op <= SLJIT_MOV_S32);
+ break;
+ case SLJIT_ARG_TYPE_32:
+ CHECK_ARGUMENT(op == SLJIT_MOV32 || (op >= SLJIT_MOV32_U8 && op <= SLJIT_MOV32_S16));
+ break;
+ case SLJIT_ARG_TYPE_P:
+ CHECK_ARGUMENT(op == SLJIT_MOV_P);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
+ CHECK_ARGUMENT(op == SLJIT_MOV_F64);
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
+ CHECK_ARGUMENT(op == SLJIT_MOV_F32);
+ break;
+ default:
+ /* Context not initialized, void, etc. */
+ CHECK_ARGUMENT(0);
+ break;
+ }
+
+ if (GET_OPCODE(op) < SLJIT_MOV_F64) {
FUNCTION_CHECK_SRC(src, srcw);
+ } else {
+ FUNCTION_FCHECK(src, srcw, op & SLJIT_32);
}
- else
- CHECK_ARGUMENT(src == 0 && srcw == 0);
compiler->last_flags = 0;
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- if (op == SLJIT_UNUSED)
- fprintf(compiler->verbose, " return\n");
- else {
- fprintf(compiler->verbose, " return%s ", op1_names[op - SLJIT_OP1_BASE]);
+ if (GET_OPCODE(op) < SLJIT_MOV_F64) {
+ fprintf(compiler->verbose, " return%s%s ", !(op & SLJIT_32) ? "" : "32",
+ op1_types[GET_OPCODE(op) - SLJIT_OP1_BASE]);
sljit_verbose_param(compiler, src, srcw);
- fprintf(compiler->verbose, "\n");
+ } else {
+ fprintf(compiler->verbose, " return%s ", !(op & SLJIT_32) ? ".f64" : ".f32");
+ sljit_verbose_fparam(compiler, src, srcw);
}
+ fprintf(compiler->verbose, "\n");
}
#endif
CHECK_RETURN_OK;
}
-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- FUNCTION_CHECK_DST(dst, dstw, 0);
- compiler->last_flags = 0;
+ FUNCTION_CHECK_SRC(src, srcw);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " fast_enter ");
- sljit_verbose_param(compiler, dst, dstw);
+ fprintf(compiler->verbose, " return_to ");
+ sljit_verbose_param(compiler, src, srcw);
fprintf(compiler->verbose, "\n");
}
#endif
@@ -1177,7 +1399,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op0(struct sljit_compiler
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT((op >= SLJIT_BREAKPOINT && op <= SLJIT_LMUL_SW)
- || ((op & ~SLJIT_I32_OP) >= SLJIT_DIVMOD_UW && (op & ~SLJIT_I32_OP) <= SLJIT_DIV_SW)
+ || ((op & ~SLJIT_32) >= SLJIT_DIVMOD_UW && (op & ~SLJIT_32) <= SLJIT_DIV_SW)
|| (op >= SLJIT_ENDBR && op <= SLJIT_SKIP_FRAMES_BEFORE_RETURN));
CHECK_ARGUMENT(GET_OPCODE(op) < SLJIT_LMUL_UW || GET_OPCODE(op) >= SLJIT_ENDBR || compiler->scratches >= 2);
if ((GET_OPCODE(op) >= SLJIT_LMUL_UW && GET_OPCODE(op) <= SLJIT_DIV_SW) || op == SLJIT_SKIP_FRAMES_BEFORE_RETURN)
@@ -1188,7 +1410,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op0(struct sljit_compiler
{
fprintf(compiler->verbose, " %s", op0_names[GET_OPCODE(op) - SLJIT_OP0_BASE]);
if (GET_OPCODE(op) >= SLJIT_DIVMOD_UW && GET_OPCODE(op) <= SLJIT_DIV_SW) {
- fprintf(compiler->verbose, (op & SLJIT_I32_OP) ? "32" : "w");
+ fprintf(compiler->verbose, (op & SLJIT_32) ? "32" : "w");
}
fprintf(compiler->verbose, "\n");
}
@@ -1206,50 +1428,32 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op1(struct sljit_compiler
}
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV && GET_OPCODE(op) <= SLJIT_CLZ);
+ CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV && GET_OPCODE(op) <= SLJIT_REV_S32);
switch (GET_OPCODE(op)) {
- case SLJIT_NOT:
- /* Only SLJIT_I32_OP and SLJIT_SET_Z are allowed. */
- CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK));
- break;
- case SLJIT_NEG:
- CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK)
- || GET_FLAG_TYPE(op) == SLJIT_OVERFLOW);
- break;
case SLJIT_MOV:
case SLJIT_MOV_U32:
+ case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
case SLJIT_MOV_P:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
/* Nothing allowed */
- CHECK_ARGUMENT(!(op & (SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
+ CHECK_ARGUMENT(!(op & (SLJIT_32 | SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
break;
default:
- /* Only SLJIT_I32_OP is allowed. */
+ /* Only SLJIT_32 is allowed. */
CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
break;
}
- FUNCTION_CHECK_DST(dst, dstw, HAS_FLAGS(op));
+ FUNCTION_CHECK_DST(dst, dstw);
FUNCTION_CHECK_SRC(src, srcw);
-
- if (GET_OPCODE(op) >= SLJIT_NOT) {
- CHECK_ARGUMENT(src != SLJIT_IMM);
- compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_I32_OP | SLJIT_SET_Z));
- }
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- if (GET_OPCODE(op) <= SLJIT_MOV_P)
- {
- fprintf(compiler->verbose, " mov%s%s ", !(op & SLJIT_I32_OP) ? "" : "32",
- (op != SLJIT_MOV32) ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : "");
- }
- else
- {
- fprintf(compiler->verbose, " %s%s%s%s%s ", op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE], !(op & SLJIT_I32_OP) ? "" : "32",
- !(op & SLJIT_SET_Z) ? "" : ".z", !(op & VARIABLE_FLAG_MASK) ? "" : ".",
- !(op & VARIABLE_FLAG_MASK) ? "" : jump_names[GET_FLAG_TYPE(op)]);
- }
+ fprintf(compiler->verbose, " %s%s%s ", op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE],
+ !(op & SLJIT_32) ? "" : "32", op1_types[GET_OPCODE(op) - SLJIT_OP1_BASE]);
sljit_verbose_param(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
@@ -1260,7 +1464,95 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op1(struct sljit_compiler
CHECK_RETURN_OK;
}
-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_atomic_load(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 mem_reg)
+{
+ if (SLJIT_UNLIKELY(compiler->skip_checks)) {
+ compiler->skip_checks = 0;
+ CHECK_RETURN_OK;
+ }
+
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_ATOMIC));
+ CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV && GET_OPCODE(op) <= SLJIT_MOV_P);
+ CHECK_ARGUMENT(GET_OPCODE(op) != SLJIT_MOV_S8 && GET_OPCODE(op) != SLJIT_MOV_S16 && GET_OPCODE(op) != SLJIT_MOV_S32);
+
+ /* All arguments must be valid registers. */
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(mem_reg) && !CHECK_IF_VIRTUAL_REGISTER(mem_reg));
+
+ if (op == SLJIT_MOV32_U8 || op == SLJIT_MOV32_U16) {
+ /* Only SLJIT_32 is allowed. */
+ CHECK_ARGUMENT(!(op & (VARIABLE_FLAG_MASK | SLJIT_SET_Z)));
+ } else {
+ /* Nothing allowed. */
+ CHECK_ARGUMENT(!(op & (SLJIT_32 | SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
+ }
+
+ compiler->last_flags = 0;
+#endif /* SLJIT_ARGUMENT_CHECKS */
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " atomic_load%s%s ", !(op & SLJIT_32) ? "" : "32",
+ op1_types[GET_OPCODE(op) - SLJIT_OP1_BASE]);
+ sljit_verbose_reg(compiler, dst_reg);
+ fprintf(compiler->verbose, ", [");
+ sljit_verbose_reg(compiler, mem_reg);
+ fprintf(compiler->verbose, "]\n");
+ }
+#endif /* SLJIT_VERBOSE */
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_atomic_store(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src_reg,
+ sljit_s32 mem_reg,
+ sljit_s32 temp_reg)
+{
+ if (SLJIT_UNLIKELY(compiler->skip_checks)) {
+ compiler->skip_checks = 0;
+ CHECK_RETURN_OK;
+ }
+
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_ATOMIC));
+ CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV && GET_OPCODE(op) <= SLJIT_MOV_P);
+ CHECK_ARGUMENT(GET_OPCODE(op) != SLJIT_MOV_S8 && GET_OPCODE(op) != SLJIT_MOV_S16 && GET_OPCODE(op) != SLJIT_MOV_S32);
+
+ /* All arguments must be valid registers. */
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(src_reg));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(mem_reg) && !CHECK_IF_VIRTUAL_REGISTER(mem_reg));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(temp_reg) && src_reg != temp_reg);
+
+ CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK) || GET_FLAG_TYPE(op) == SLJIT_ATOMIC_STORED);
+
+ if (GET_OPCODE(op) == SLJIT_MOV_U8 || GET_OPCODE(op) == SLJIT_MOV_U16) {
+ /* Only SLJIT_32, SLJIT_ATOMIC_STORED are allowed. */
+ CHECK_ARGUMENT(!(op & SLJIT_SET_Z));
+ } else {
+ /* Only SLJIT_ATOMIC_STORED is allowed. */
+ CHECK_ARGUMENT(!(op & (SLJIT_32 | SLJIT_SET_Z)));
+ }
+
+ compiler->last_flags = GET_FLAG_TYPE(op) | (op & SLJIT_32);
+#endif /* SLJIT_ARGUMENT_CHECKS */
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " atomic_store%s%s%s ", !(op & SLJIT_32) ? "" : "32",
+ op1_types[GET_OPCODE(op) - SLJIT_OP1_BASE], !(op & VARIABLE_FLAG_MASK) ? "" : ".stored");
+ sljit_verbose_reg(compiler, src_reg);
+ fprintf(compiler->verbose, ", [");
+ sljit_verbose_reg(compiler, mem_reg);
+ fprintf(compiler->verbose, "], ");
+ sljit_verbose_reg(compiler, temp_reg);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif /* SLJIT_VERBOSE */
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 unset,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
@@ -1271,15 +1563,18 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler
}
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_ADD && GET_OPCODE(op) <= SLJIT_ASHR);
+ CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_ADD && GET_OPCODE(op) <= SLJIT_ROTR);
switch (GET_OPCODE(op)) {
case SLJIT_AND:
case SLJIT_OR:
case SLJIT_XOR:
case SLJIT_SHL:
+ case SLJIT_MSHL:
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
case SLJIT_ASHR:
+ case SLJIT_MASHR:
CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK));
break;
case SLJIT_MUL:
@@ -1302,24 +1597,35 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler
CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK)
|| GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY));
CHECK_ARGUMENT((compiler->last_flags & 0xff) == GET_FLAG_TYPE(SLJIT_SET_CARRY));
- CHECK_ARGUMENT((op & SLJIT_I32_OP) == (compiler->last_flags & SLJIT_I32_OP));
+ CHECK_ARGUMENT((op & SLJIT_32) == (compiler->last_flags & SLJIT_32));
+ break;
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
+ CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
break;
default:
SLJIT_UNREACHABLE();
break;
}
- FUNCTION_CHECK_DST(dst, dstw, HAS_FLAGS(op));
+ if (unset) {
+ CHECK_ARGUMENT(HAS_FLAGS(op));
+ } else {
+ FUNCTION_CHECK_DST(dst, dstw);
+ }
FUNCTION_CHECK_SRC(src1, src1w);
FUNCTION_CHECK_SRC(src2, src2w);
- compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_I32_OP | SLJIT_SET_Z));
+ compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_32 | SLJIT_SET_Z));
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s%s%s%s%s ", op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE], !(op & SLJIT_I32_OP) ? "" : "32",
+ fprintf(compiler->verbose, " %s%s%s%s%s ", op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE], !(op & SLJIT_32) ? "" : "32",
!(op & SLJIT_SET_Z) ? "" : ".z", !(op & VARIABLE_FLAG_MASK) ? "" : ".",
!(op & VARIABLE_FLAG_MASK) ? "" : jump_names[GET_FLAG_TYPE(op)]);
- sljit_verbose_param(compiler, dst, dstw);
+ if (unset)
+ fprintf(compiler->verbose, "unset");
+ else
+ sljit_verbose_param(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, src1, src1w);
fprintf(compiler->verbose, ", ");
@@ -1330,6 +1636,40 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler
CHECK_RETURN_OK;
}
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_LSHR
+ || GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR);
+ CHECK_ARGUMENT((op & ~(0xff | SLJIT_32 | SLJIT_SHIFT_INTO_NON_ZERO)) == 0);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(src1_reg));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(src2_reg));
+ FUNCTION_CHECK_SRC(src3, src3w);
+ CHECK_ARGUMENT(dst_reg != src2_reg);
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " %s%s.into%s ", op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE], !(op & SLJIT_32) ? "" : "32",
+ (op & SLJIT_SHIFT_INTO_NON_ZERO) ? ".nz" : "");
+
+ sljit_verbose_reg(compiler, dst_reg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_reg(compiler, src1_reg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_reg(compiler, src2_reg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_param(compiler, src3, src3w);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw)
{
@@ -1337,19 +1677,16 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_src(struct sljit_compi
CHECK_ARGUMENT(op >= SLJIT_FAST_RETURN && op <= SLJIT_PREFETCH_ONCE);
FUNCTION_CHECK_SRC(src, srcw);
- if (op == SLJIT_FAST_RETURN || op == SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN)
- {
+ if (op == SLJIT_FAST_RETURN || op == SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN) {
CHECK_ARGUMENT(src != SLJIT_IMM);
compiler->last_flags = 0;
- }
- else if (op >= SLJIT_PREFETCH_L1 && op <= SLJIT_PREFETCH_ONCE)
- {
+ } else if (op >= SLJIT_PREFETCH_L1 && op <= SLJIT_PREFETCH_ONCE) {
CHECK_ARGUMENT(src & SLJIT_MEM);
}
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s ", op_src_names[op - SLJIT_OP_SRC_BASE]);
+ fprintf(compiler->verbose, " %s ", op_src_dst_names[op - SLJIT_OP_SRC_DST_BASE]);
sljit_verbose_param(compiler, src, srcw);
fprintf(compiler->verbose, "\n");
}
@@ -1357,29 +1694,48 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_src(struct sljit_compi
CHECK_RETURN_OK;
}
-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_get_register_index(sljit_s32 reg)
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
{
- SLJIT_UNUSED_ARG(reg);
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(reg > 0 && reg <= SLJIT_NUMBER_OF_REGISTERS);
+ CHECK_ARGUMENT(op >= SLJIT_FAST_ENTER && op <= SLJIT_GET_RETURN_ADDRESS);
+ FUNCTION_CHECK_DST(dst, dstw);
+
+ if (op == SLJIT_FAST_ENTER)
+ compiler->last_flags = 0;
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " %s ", op_src_dst_names[op - SLJIT_OP_SRC_DST_BASE]);
+ sljit_verbose_param(compiler, dst, dstw);
+ fprintf(compiler->verbose, "\n");
+ }
#endif
CHECK_RETURN_OK;
}
-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_get_float_register_index(sljit_s32 reg)
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
{
+ SLJIT_UNUSED_ARG(type);
SLJIT_UNUSED_ARG(reg);
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(reg > 0 && reg <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
+ if (type == SLJIT_GP_REGISTER) {
+ CHECK_ARGUMENT((reg > 0 && reg <= SLJIT_NUMBER_OF_REGISTERS)
+ || (reg >= SLJIT_TMP_REGISTER_BASE && reg <= (SLJIT_TMP_REGISTER_BASE + SLJIT_NUMBER_OF_TEMPORARY_REGISTERS)));
+ } else {
+ CHECK_ARGUMENT(type == SLJIT_FLOAT_REGISTER || ((type >> 12) == 0 || ((type >> 12) >= 3 && (type >> 12) <= 6)));
+ CHECK_ARGUMENT((reg > 0 && reg <= SLJIT_NUMBER_OF_FLOAT_REGISTERS)
+ || (reg >= SLJIT_TMP_FREGISTER_BASE && reg <= (SLJIT_TMP_FREGISTER_BASE + SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS)));
+ }
#endif
CHECK_RETURN_OK;
}
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
- int i;
+ sljit_u32 i;
#endif
SLJIT_UNUSED_ARG(compiler);
@@ -1424,17 +1780,17 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1(struct sljit_compile
CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV_F64 && GET_OPCODE(op) <= SLJIT_ABS_F64);
CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
- FUNCTION_FCHECK(src, srcw);
- FUNCTION_FCHECK(dst, dstw);
+ FUNCTION_FCHECK(src, srcw, op & SLJIT_32);
+ FUNCTION_FCHECK(dst, dstw, op & SLJIT_32);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
fprintf(compiler->verbose, " %s%s ", fop1_names[SLJIT_CONV_F64_FROM_F32 - SLJIT_FOP1_BASE],
- (op & SLJIT_F32_OP) ? ".f32.from.f64" : ".f64.from.f32");
+ (op & SLJIT_32) ? ".f32.from.f64" : ".f64.from.f32");
else
fprintf(compiler->verbose, " %s%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
- (op & SLJIT_F32_OP) ? ".f32" : ".f64");
+ (op & SLJIT_32) ? ".f32" : ".f64");
sljit_verbose_fparam(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
@@ -1450,7 +1806,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_cmp(struct sljit_com
sljit_s32 src2, sljit_sw src2w)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_I32_OP | SLJIT_SET_Z));
+ compiler->last_flags = GET_FLAG_TYPE(op) | (op & SLJIT_32);
#endif
if (SLJIT_UNLIKELY(compiler->skip_checks)) {
@@ -1463,15 +1819,15 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_cmp(struct sljit_com
CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_CMP_F64);
CHECK_ARGUMENT(!(op & SLJIT_SET_Z));
CHECK_ARGUMENT((op & VARIABLE_FLAG_MASK)
- || (GET_FLAG_TYPE(op) >= SLJIT_EQUAL_F64 && GET_FLAG_TYPE(op) <= SLJIT_ORDERED_F64));
- FUNCTION_FCHECK(src1, src1w);
- FUNCTION_FCHECK(src2, src2w);
+ || (GET_FLAG_TYPE(op) >= SLJIT_F_EQUAL && GET_FLAG_TYPE(op) <= SLJIT_ORDERED_LESS_EQUAL));
+ FUNCTION_FCHECK(src1, src1w, op & SLJIT_32);
+ FUNCTION_FCHECK(src2, src2w, op & SLJIT_32);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s%s", fop1_names[SLJIT_CMP_F64 - SLJIT_FOP1_BASE], (op & SLJIT_F32_OP) ? ".f32" : ".f64");
+ fprintf(compiler->verbose, " %s%s", fop1_names[SLJIT_CMP_F64 - SLJIT_FOP1_BASE], (op & SLJIT_32) ? ".f32" : ".f64");
if (op & VARIABLE_FLAG_MASK) {
- fprintf(compiler->verbose, ".%s_f", jump_names[GET_FLAG_TYPE(op)]);
+ fprintf(compiler->verbose, ".%s", jump_names[GET_FLAG_TYPE(op)]);
}
fprintf(compiler->verbose, " ");
sljit_verbose_fparam(compiler, src1, src1w);
@@ -1494,16 +1850,15 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_sw_from_f64(str
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
- CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONV_SW_FROM_F64 && GET_OPCODE(op) <= SLJIT_CONV_S32_FROM_F64);
CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
- FUNCTION_FCHECK(src, srcw);
- FUNCTION_CHECK_DST(dst, dstw, 0);
+ FUNCTION_FCHECK(src, srcw, op & SLJIT_32);
+ FUNCTION_CHECK_DST(dst, dstw);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
fprintf(compiler->verbose, " %s%s.from%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
- (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? ".s32" : ".sw",
- (op & SLJIT_F32_OP) ? ".f32" : ".f64");
+ fop1_conv_types[GET_OPCODE(op) - SLJIT_CONV_SW_FROM_F64],
+ (op & SLJIT_32) ? ".f32" : ".f64");
sljit_verbose_param(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
sljit_verbose_fparam(compiler, src, srcw);
@@ -1513,7 +1868,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_sw_from_f64(str
CHECK_RETURN_OK;
}
-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_f64_from_w(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -1524,16 +1879,15 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_f64_from_sw(str
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
- CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONV_F64_FROM_SW && GET_OPCODE(op) <= SLJIT_CONV_F64_FROM_S32);
CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
FUNCTION_CHECK_SRC(src, srcw);
- FUNCTION_FCHECK(dst, dstw);
+ FUNCTION_FCHECK(dst, dstw, op & SLJIT_32);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s%s.from%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
- (op & SLJIT_F32_OP) ? ".f32" : ".f64",
- (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? ".s32" : ".sw");
+ fprintf(compiler->verbose, " %s%s.from.%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
+ (op & SLJIT_32) ? ".f32" : ".f64",
+ fop1_conv_types[GET_OPCODE(op) - SLJIT_CONV_SW_FROM_F64]);
sljit_verbose_fparam(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, src, srcw);
@@ -1548,17 +1902,22 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop2(struct sljit_compile
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
+ if (SLJIT_UNLIKELY(compiler->skip_checks)) {
+ compiler->skip_checks = 0;
+ CHECK_RETURN_OK;
+ }
+
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_ADD_F64 && GET_OPCODE(op) <= SLJIT_DIV_F64);
CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
- FUNCTION_FCHECK(src1, src1w);
- FUNCTION_FCHECK(src2, src2w);
- FUNCTION_FCHECK(dst, dstw);
+ FUNCTION_FCHECK(src1, src1w, op & SLJIT_32);
+ FUNCTION_FCHECK(src2, src2w, op & SLJIT_32);
+ FUNCTION_FCHECK(dst, dstw, op & SLJIT_32);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s%s ", fop2_names[GET_OPCODE(op) - SLJIT_FOP2_BASE], (op & SLJIT_F32_OP) ? ".f32" : ".f64");
+ fprintf(compiler->verbose, " %s%s ", fop2_names[GET_OPCODE(op) - SLJIT_FOP2_BASE], (op & SLJIT_32) ? ".f32" : ".f64");
sljit_verbose_fparam(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
sljit_verbose_fparam(compiler, src1, src1w);
@@ -1570,6 +1929,138 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop2(struct sljit_compile
CHECK_RETURN_OK;
}
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop2r(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
+ CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_COPYSIGN_F64);
+ FUNCTION_FCHECK(src1, src1w, op & SLJIT_32);
+ FUNCTION_FCHECK(src2, src2w, op & SLJIT_32);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(dst_freg, op & SLJIT_32));
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " %s%s ", fop2r_names[GET_OPCODE(op) - SLJIT_FOP2R_BASE], (op & SLJIT_32) ? ".f32" : ".f64");
+ sljit_verbose_freg(compiler, dst_freg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_fparam(compiler, src1, src1w);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_fparam(compiler, src2, src2w);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
+{
+ SLJIT_UNUSED_ARG(value);
+
+ if (SLJIT_UNLIKELY(compiler->skip_checks)) {
+ compiler->skip_checks = 0;
+ CHECK_RETURN_OK;
+ }
+
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, 1));
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " fset32 ");
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, ", %f\n", value);
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ SLJIT_UNUSED_ARG(value);
+
+ if (SLJIT_UNLIKELY(compiler->skip_checks)) {
+ compiler->skip_checks = 0;
+ CHECK_RETURN_OK;
+ }
+
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, 0));
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " fset64 ");
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, ", %f\n", value);
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
+ CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_COPY_TO_F64 && GET_OPCODE(op) <= SLJIT_COPY_FROM_F64);
+ CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, op & SLJIT_32));
+
+#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE)
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(reg));
+#else /* !SLJIT_64BIT_ARCHITECTURE */
+ switch (op) {
+ case SLJIT_COPY32_TO_F32:
+ case SLJIT_COPY32_FROM_F32:
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(reg));
+ break;
+ case SLJIT_COPY_TO_F64:
+ case SLJIT_COPY_FROM_F64:
+ if (reg & REG_PAIR_MASK) {
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(REG_PAIR_FIRST(reg)));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(REG_PAIR_SECOND(reg)));
+
+ if (op == SLJIT_COPY_TO_F64)
+ break;
+
+ CHECK_ARGUMENT(REG_PAIR_FIRST(reg) != REG_PAIR_SECOND(reg));
+ break;
+ }
+
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(reg));
+ break;
+ }
+#endif /* SLJIT_64BIT_ARCHITECTURE */
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " copy%s_%s_f%s ", (op & SLJIT_32) ? "32" : "",
+ GET_OPCODE(op) == SLJIT_COPY_TO_F64 ? "to" : "from", (op & SLJIT_32) ? "32" : "64");
+
+ sljit_verbose_freg(compiler, freg);
+
+ if (reg & REG_PAIR_MASK) {
+ fprintf(compiler->verbose, ", {");
+ sljit_verbose_reg(compiler, REG_PAIR_FIRST(reg));
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_reg(compiler, REG_PAIR_SECOND(reg));
+ fprintf(compiler->verbose, "}\n");
+ } else {
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_reg(compiler, reg);
+ fprintf(compiler->verbose, "\n");
+ }
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_label(struct sljit_compiler *compiler)
{
SLJIT_UNUSED_ARG(compiler);
@@ -1590,6 +2081,17 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_label(struct sljit_compil
CHECK_RETURN_OK;
}
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
+ || (defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM)
+#define CHECK_UNORDERED(type, last_flags) \
+ ((((type) & 0xfe) == SLJIT_ORDERED) && \
+ ((last_flags) & 0xff) >= SLJIT_UNORDERED && ((last_flags) & 0xff) <= SLJIT_ORDERED_LESS_EQUAL)
+#else
+#define CHECK_UNORDERED(type, last_flags) 0
+#endif
+#endif /* SLJIT_ARGUMENT_CHECKS */
+
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
{
if (SLJIT_UNLIKELY(compiler->skip_checks)) {
@@ -1598,23 +2100,24 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_jump(struct sljit_compile
}
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_I32_OP)));
- CHECK_ARGUMENT((type & 0xff) != GET_FLAG_TYPE(SLJIT_SET_CARRY) && (type & 0xff) != (GET_FLAG_TYPE(SLJIT_SET_CARRY) + 1));
+ CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP)));
CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_FAST_CALL);
- CHECK_ARGUMENT((type & 0xff) < SLJIT_JUMP || !(type & SLJIT_I32_OP));
if ((type & 0xff) < SLJIT_JUMP) {
if ((type & 0xff) <= SLJIT_NOT_ZERO)
CHECK_ARGUMENT(compiler->last_flags & SLJIT_SET_Z);
- else
- CHECK_ARGUMENT((type & 0xff) == (compiler->last_flags & 0xff)
- || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW));
+ else if ((compiler->last_flags & 0xff) == SLJIT_CARRY) {
+ CHECK_ARGUMENT((type & 0xfe) == SLJIT_CARRY);
+ compiler->last_flags = 0;
+ } else
+ CHECK_ARGUMENT((type & 0xfe) == (compiler->last_flags & 0xff)
+ || CHECK_UNORDERED(type, compiler->last_flags));
}
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose))
- fprintf(compiler->verbose, " jump%s %s%s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
- jump_names[type & 0xff], JUMP_POSTFIX(type));
+ fprintf(compiler->verbose, " jump%s %s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
+ jump_names[type & 0xff]);
#endif
CHECK_RETURN_OK;
}
@@ -1623,49 +2126,33 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_call(struct sljit_compile
sljit_s32 arg_types)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- sljit_s32 i, types, curr_type, scratches, fscratches;
+ CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_CALL_RETURN)));
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_CALL && (type & 0xff) <= SLJIT_CALL_REG_ARG);
+ CHECK_ARGUMENT(function_check_arguments(arg_types, compiler->scratches, -1, compiler->fscratches));
- CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP)));
- CHECK_ARGUMENT((type & 0xff) == SLJIT_CALL || (type & 0xff) == SLJIT_CALL_CDECL);
-
- types = arg_types;
- scratches = 0;
- fscratches = 0;
- for (i = 0; i < 5; i++) {
- curr_type = (types & SLJIT_DEF_MASK);
- CHECK_ARGUMENT(curr_type <= SLJIT_ARG_TYPE_F64);
- if (i > 0) {
- if (curr_type == 0) {
- break;
- }
- if (curr_type >= SLJIT_ARG_TYPE_F32)
- fscratches++;
- else
- scratches++;
+ if (type & SLJIT_CALL_RETURN) {
+ CHECK_ARGUMENT((arg_types & SLJIT_ARG_MASK) == compiler->last_return);
+
+ if (compiler->options & SLJIT_ENTER_REG_ARG) {
+ CHECK_ARGUMENT((type & 0xff) == SLJIT_CALL_REG_ARG);
} else {
- if (curr_type >= SLJIT_ARG_TYPE_F32) {
- CHECK_ARGUMENT(compiler->fscratches > 0);
- } else if (curr_type >= SLJIT_ARG_TYPE_SW) {
- CHECK_ARGUMENT(compiler->scratches > 0);
- }
+ CHECK_ARGUMENT((type & 0xff) != SLJIT_CALL_REG_ARG);
}
- types >>= SLJIT_DEF_SHIFT;
}
- CHECK_ARGUMENT(compiler->scratches >= scratches);
- CHECK_ARGUMENT(compiler->fscratches >= fscratches);
- CHECK_ARGUMENT(types == 0);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s%s ret[%s", jump_names[type & 0xff],
- !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", call_arg_names[arg_types & SLJIT_DEF_MASK]);
+ fprintf(compiler->verbose, " %s%s%s ret[%s", jump_names[type & 0xff],
+ !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
+ !(type & SLJIT_CALL_RETURN) ? "" : ".ret",
+ call_arg_names[arg_types & SLJIT_ARG_MASK]);
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
if (arg_types) {
fprintf(compiler->verbose, "], args[");
do {
- fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]);
- arg_types >>= SLJIT_DEF_SHIFT;
+ fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_ARG_MASK]);
+ arg_types >>= SLJIT_ARG_SHIFT;
if (arg_types)
fprintf(compiler->verbose, ",");
} while (arg_types);
@@ -1681,7 +2168,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmp(struct sljit_compiler
sljit_s32 src2, sljit_sw src2w)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_I32_OP)));
+ CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_32)));
CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_SIG_LESS_EQUAL);
FUNCTION_CHECK_SRC(src1, src1w);
FUNCTION_CHECK_SRC(src2, src2w);
@@ -1689,8 +2176,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmp(struct sljit_compiler
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " cmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
- jump_names[type & 0xff], (type & SLJIT_I32_OP) ? "32" : "");
+ fprintf(compiler->verbose, " cmp%s%s %s, ", (type & SLJIT_32) ? "32" : "",
+ !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]);
sljit_verbose_param(compiler, src1, src1w);
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, src2, src2w);
@@ -1706,16 +2193,16 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fcmp(struct sljit_compile
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
- CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_F32_OP)));
- CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL_F64 && (type & 0xff) <= SLJIT_ORDERED_F64);
- FUNCTION_FCHECK(src1, src1w);
- FUNCTION_FCHECK(src2, src2w);
+ CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_32)));
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_F_EQUAL && (type & 0xff) <= SLJIT_ORDERED_LESS_EQUAL);
+ FUNCTION_FCHECK(src1, src1w, type & SLJIT_32);
+ FUNCTION_FCHECK(src2, src2w, type & SLJIT_32);
compiler->last_flags = 0;
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " fcmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
- jump_names[type & 0xff], (type & SLJIT_F32_OP) ? ".f32" : ".f64");
+ fprintf(compiler->verbose, " fcmp%s%s %s, ", (type & SLJIT_32) ? ".f32" : ".f64",
+ !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]);
sljit_verbose_fparam(compiler, src1, src1w);
fprintf(compiler->verbose, ", ");
sljit_verbose_fparam(compiler, src2, src2w);
@@ -1752,49 +2239,33 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_icall(struct sljit_compil
sljit_s32 src, sljit_sw srcw)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- sljit_s32 i, types, curr_type, scratches, fscratches;
-
- CHECK_ARGUMENT(type == SLJIT_CALL || type == SLJIT_CALL_CDECL);
+ CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_CALL_RETURN)));
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_CALL && (type & 0xff) <= SLJIT_CALL_REG_ARG);
+ CHECK_ARGUMENT(function_check_arguments(arg_types, compiler->scratches, -1, compiler->fscratches));
FUNCTION_CHECK_SRC(src, srcw);
- types = arg_types;
- scratches = 0;
- fscratches = 0;
- for (i = 0; i < 5; i++) {
- curr_type = (types & SLJIT_DEF_MASK);
- CHECK_ARGUMENT(curr_type <= SLJIT_ARG_TYPE_F64);
- if (i > 0) {
- if (curr_type == 0) {
- break;
- }
- if (curr_type >= SLJIT_ARG_TYPE_F32)
- fscratches++;
- else
- scratches++;
+ if (type & SLJIT_CALL_RETURN) {
+ CHECK_ARGUMENT((arg_types & SLJIT_ARG_MASK) == compiler->last_return);
+
+ if (compiler->options & SLJIT_ENTER_REG_ARG) {
+ CHECK_ARGUMENT((type & 0xff) == SLJIT_CALL_REG_ARG);
} else {
- if (curr_type >= SLJIT_ARG_TYPE_F32) {
- CHECK_ARGUMENT(compiler->fscratches > 0);
- } else if (curr_type >= SLJIT_ARG_TYPE_SW) {
- CHECK_ARGUMENT(compiler->scratches > 0);
- }
+ CHECK_ARGUMENT((type & 0xff) != SLJIT_CALL_REG_ARG);
}
- types >>= SLJIT_DEF_SHIFT;
}
- CHECK_ARGUMENT(compiler->scratches >= scratches);
- CHECK_ARGUMENT(compiler->fscratches >= fscratches);
- CHECK_ARGUMENT(types == 0);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
fprintf(compiler->verbose, " i%s%s ret[%s", jump_names[type & 0xff],
- !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", call_arg_names[arg_types & SLJIT_DEF_MASK]);
+ !(type & SLJIT_CALL_RETURN) ? "" : ".ret",
+ call_arg_names[arg_types & SLJIT_ARG_MASK]);
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
if (arg_types) {
fprintf(compiler->verbose, "], args[");
do {
- fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]);
- arg_types >>= SLJIT_DEF_SHIFT;
+ fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_ARG_MASK]);
+ arg_types >>= SLJIT_ARG_SHIFT;
if (arg_types)
fprintf(compiler->verbose, ",");
} while (arg_types);
@@ -1812,66 +2283,109 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_flags(struct sljit_com
sljit_s32 type)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP)));
- CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64);
- CHECK_ARGUMENT((type & 0xff) != GET_FLAG_TYPE(SLJIT_SET_CARRY) && (type & 0xff) != (GET_FLAG_TYPE(SLJIT_SET_CARRY) + 1));
+ CHECK_ARGUMENT(type >= SLJIT_EQUAL && type <= SLJIT_ORDERED_LESS_EQUAL);
CHECK_ARGUMENT(op == SLJIT_MOV || op == SLJIT_MOV32
|| (GET_OPCODE(op) >= SLJIT_AND && GET_OPCODE(op) <= SLJIT_XOR));
CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK));
- if ((type & 0xff) <= SLJIT_NOT_ZERO)
+ if (type <= SLJIT_NOT_ZERO)
CHECK_ARGUMENT(compiler->last_flags & SLJIT_SET_Z);
else
- CHECK_ARGUMENT((type & 0xff) == (compiler->last_flags & 0xff)
- || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW));
+ CHECK_ARGUMENT((type & 0xfe) == (compiler->last_flags & 0xff)
+ || CHECK_UNORDERED(type, compiler->last_flags));
- FUNCTION_CHECK_DST(dst, dstw, 0);
+ FUNCTION_CHECK_DST(dst, dstw);
if (GET_OPCODE(op) >= SLJIT_ADD)
- compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_I32_OP | SLJIT_SET_Z));
+ compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_32 | SLJIT_SET_Z));
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " flags%s %s%s, ",
- !(op & SLJIT_SET_Z) ? "" : ".z",
+ fprintf(compiler->verbose, " flags.%s%s%s ",
GET_OPCODE(op) < SLJIT_OP2_BASE ? "mov" : op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE],
- GET_OPCODE(op) < SLJIT_OP2_BASE ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : ((op & SLJIT_I32_OP) ? "32" : ""));
+ GET_OPCODE(op) < SLJIT_OP2_BASE ? op1_types[GET_OPCODE(op) - SLJIT_OP1_BASE] : ((op & SLJIT_32) ? "32" : ""),
+ !(op & SLJIT_SET_Z) ? "" : ".z");
sljit_verbose_param(compiler, dst, dstw);
- fprintf(compiler->verbose, ", %s%s\n", jump_names[type & 0xff], JUMP_POSTFIX(type));
+ fprintf(compiler->verbose, ", %s\n", jump_names[type]);
}
#endif
CHECK_RETURN_OK;
}
-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP)));
- CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64);
+ sljit_s32 cond = type & ~SLJIT_32;
+
+ CHECK_ARGUMENT(cond >= SLJIT_EQUAL && cond <= SLJIT_ORDERED_LESS_EQUAL);
CHECK_ARGUMENT(compiler->scratches != -1 && compiler->saveds != -1);
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg & ~SLJIT_I32_OP));
- if (src != SLJIT_IMM) {
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(src));
- CHECK_ARGUMENT(srcw == 0);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg));
+ FUNCTION_CHECK_SRC(src1, src1w);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(src2_reg));
+
+ if (cond <= SLJIT_NOT_ZERO)
+ CHECK_ARGUMENT(compiler->last_flags & SLJIT_SET_Z);
+ else if ((compiler->last_flags & 0xff) == SLJIT_CARRY) {
+ CHECK_ARGUMENT((type & 0xfe) == SLJIT_CARRY);
+ compiler->last_flags = 0;
+ } else
+ CHECK_ARGUMENT((cond & 0xfe) == (compiler->last_flags & 0xff)
+ || CHECK_UNORDERED(cond, compiler->last_flags));
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " select%s %s, ",
+ !(type & SLJIT_32) ? "" : "32",
+ jump_names[type & ~SLJIT_32]);
+ sljit_verbose_reg(compiler, dst_reg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_param(compiler, src1, src1w);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_reg(compiler, src2_reg);
+ fprintf(compiler->verbose, "\n");
}
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ sljit_s32 cond = type & ~SLJIT_32;
+
+ CHECK_ARGUMENT(cond >= SLJIT_EQUAL && cond <= SLJIT_ORDERED_LESS_EQUAL);
+
+ CHECK_ARGUMENT(compiler->fscratches != -1 && compiler->fsaveds != -1);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(dst_freg, type & SLJIT_32));
+ FUNCTION_FCHECK(src1, src1w, type & SLJIT_32);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(src2_freg, type & SLJIT_32));
- if ((type & 0xff) <= SLJIT_NOT_ZERO)
+ if (cond <= SLJIT_NOT_ZERO)
CHECK_ARGUMENT(compiler->last_flags & SLJIT_SET_Z);
- else
- CHECK_ARGUMENT((type & 0xff) == (compiler->last_flags & 0xff)
- || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW));
+ else if ((compiler->last_flags & 0xff) == SLJIT_CARRY) {
+ CHECK_ARGUMENT((type & 0xfe) == SLJIT_CARRY);
+ compiler->last_flags = 0;
+ } else
+ CHECK_ARGUMENT((cond & 0xfe) == (compiler->last_flags & 0xff)
+ || CHECK_UNORDERED(cond, compiler->last_flags));
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " cmov%s %s%s, ",
- !(dst_reg & SLJIT_I32_OP) ? "" : "32",
- jump_names[type & 0xff], JUMP_POSTFIX(type));
- sljit_verbose_reg(compiler, dst_reg & ~SLJIT_I32_OP);
+ fprintf(compiler->verbose, " fselect%s %s, ",
+ !(type & SLJIT_32) ? "" : "32",
+ jump_names[type & ~SLJIT_32]);
+ sljit_verbose_freg(compiler, dst_freg);
fprintf(compiler->verbose, ", ");
- sljit_verbose_param(compiler, src, srcw);
+ sljit_verbose_fparam(compiler, src1, src1w);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_freg(compiler, src2_freg);
fprintf(compiler->verbose, "\n");
}
#endif
@@ -1883,27 +2397,123 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_mem(struct sljit_compiler
sljit_s32 mem, sljit_sw memw)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT((type & 0xff) >= SLJIT_MOV && (type & 0xff) <= SLJIT_MOV_P);
- CHECK_ARGUMENT(!(type & SLJIT_I32_OP) || ((type & 0xff) != SLJIT_MOV && (type & 0xff) != SLJIT_MOV_U32 && (type & 0xff) != SLJIT_MOV_P));
- CHECK_ARGUMENT((type & SLJIT_MEM_PRE) || (type & SLJIT_MEM_POST));
- CHECK_ARGUMENT((type & (SLJIT_MEM_PRE | SLJIT_MEM_POST)) != (SLJIT_MEM_PRE | SLJIT_MEM_POST));
- CHECK_ARGUMENT((type & ~(0xff | SLJIT_I32_OP | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_PRE | SLJIT_MEM_POST)) == 0);
+ sljit_s32 allowed_flags;
+#endif /* SLJIT_ARGUMENT_CHECKS */
+
+ if (SLJIT_UNLIKELY(compiler->skip_checks)) {
+ compiler->skip_checks = 0;
+ CHECK_RETURN_OK;
+ }
+
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ if (type & SLJIT_MEM_UNALIGNED) {
+ CHECK_ARGUMENT(!(type & (SLJIT_MEM_ALIGNED_16 | SLJIT_MEM_ALIGNED_32)));
+ } else if (type & SLJIT_MEM_ALIGNED_16) {
+ CHECK_ARGUMENT(!(type & SLJIT_MEM_ALIGNED_32));
+ } else {
+ CHECK_ARGUMENT((reg & REG_PAIR_MASK) || (type & SLJIT_MEM_ALIGNED_32));
+ }
+
+ allowed_flags = SLJIT_MEM_UNALIGNED;
+
+ switch (type & 0xff) {
+ case SLJIT_MOV_P:
+ case SLJIT_MOV:
+ allowed_flags |= SLJIT_MEM_ALIGNED_32;
+ /* fallthrough */
+ case SLJIT_MOV_U32:
+ case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
+ allowed_flags |= SLJIT_MEM_ALIGNED_16;
+ break;
+ }
+
+ CHECK_ARGUMENT((type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | allowed_flags)) == 0);
+
+ if (reg & REG_PAIR_MASK) {
+ CHECK_ARGUMENT((type & 0xff) == SLJIT_MOV);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(REG_PAIR_FIRST(reg)));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(REG_PAIR_SECOND(reg)));
+ CHECK_ARGUMENT(REG_PAIR_FIRST(reg) != REG_PAIR_SECOND(reg));
+ } else {
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_MOV && (type & 0xff) <= SLJIT_MOV_P);
+ CHECK_ARGUMENT(!(type & SLJIT_32) || ((type & 0xff) >= SLJIT_MOV_U8 && (type & 0xff) <= SLJIT_MOV_S16));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(reg));
+ }
FUNCTION_CHECK_SRC_MEM(mem, memw);
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(reg));
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if ((type & 0xff) == SLJIT_MOV32)
+ fprintf(compiler->verbose, " %s32",
+ (type & SLJIT_MEM_STORE) ? "store" : "load");
+ else
+ fprintf(compiler->verbose, " %s%s%s",
+ (type & SLJIT_MEM_STORE) ? "store" : "load",
+ !(type & SLJIT_32) ? "" : "32", op1_types[(type & 0xff) - SLJIT_OP1_BASE]);
+
+ if (type & SLJIT_MEM_UNALIGNED)
+ printf(".unal");
+ else if (type & SLJIT_MEM_ALIGNED_16)
+ printf(".al16");
+ else if (type & SLJIT_MEM_ALIGNED_32)
+ printf(".al32");
+
+ if (reg & REG_PAIR_MASK) {
+ fprintf(compiler->verbose, " {");
+ sljit_verbose_reg(compiler, REG_PAIR_FIRST(reg));
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_reg(compiler, REG_PAIR_SECOND(reg));
+ fprintf(compiler->verbose, "}, ");
+ } else {
+ fprintf(compiler->verbose, " ");
+ sljit_verbose_reg(compiler, reg);
+ fprintf(compiler->verbose, ", ");
+ }
+ sljit_verbose_param(compiler, mem, memw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ if (SLJIT_UNLIKELY(compiler->skip_checks)) {
+ compiler->skip_checks = 0;
+ CHECK_RETURN_OK;
+ }
+
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_MOV && (type & 0xff) <= SLJIT_MOV_P);
+ CHECK_ARGUMENT((type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_POST)) == 0);
+ CHECK_ARGUMENT((mem & REG_MASK) != 0 && (mem & REG_MASK) != reg);
- CHECK_ARGUMENT((mem & REG_MASK) != SLJIT_UNUSED && (mem & REG_MASK) != reg);
+ FUNCTION_CHECK_SRC_MEM(mem, memw);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
- if (!(type & SLJIT_MEM_SUPP) && SLJIT_UNLIKELY(!!compiler->verbose)) {
- if (sljit_emit_mem(compiler, type | SLJIT_MEM_SUPP, reg, mem, memw) == SLJIT_ERR_UNSUPPORTED)
- fprintf(compiler->verbose, " //");
-
- fprintf(compiler->verbose, " mem%s.%s%s%s ",
- !(type & SLJIT_I32_OP) ? "" : "32",
- (type & SLJIT_MEM_STORE) ? "st" : "ld",
- op1_names[(type & 0xff) - SLJIT_OP1_BASE],
- (type & SLJIT_MEM_PRE) ? ".pre" : ".post");
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (type & SLJIT_MEM_SUPP)
+ CHECK_RETURN_OK;
+ if (sljit_emit_mem_update(compiler, type | SLJIT_MEM_SUPP, reg, mem, memw) == SLJIT_ERR_UNSUPPORTED) {
+ fprintf(compiler->verbose, " # mem: unsupported form, no instructions are emitted\n");
+ CHECK_RETURN_OK;
+ }
+
+ if ((type & 0xff) == SLJIT_MOV32)
+ fprintf(compiler->verbose, " %s32.%s ",
+ (type & SLJIT_MEM_STORE) ? "store" : "load",
+ (type & SLJIT_MEM_POST) ? "post" : "pre");
+ else
+ fprintf(compiler->verbose, " %s%s%s.%s ",
+ (type & SLJIT_MEM_STORE) ? "store" : "load",
+ !(type & SLJIT_32) ? "" : "32",
+ op1_types[(type & 0xff) - SLJIT_OP1_BASE],
+ (type & SLJIT_MEM_POST) ? "post" : "pre");
+
sljit_verbose_reg(compiler, reg);
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, mem, memw);
@@ -1918,23 +2528,70 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fmem(struct sljit_compile
sljit_s32 mem, sljit_sw memw)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
CHECK_ARGUMENT((type & 0xff) == SLJIT_MOV_F64);
- CHECK_ARGUMENT((type & SLJIT_MEM_PRE) || (type & SLJIT_MEM_POST));
- CHECK_ARGUMENT((type & (SLJIT_MEM_PRE | SLJIT_MEM_POST)) != (SLJIT_MEM_PRE | SLJIT_MEM_POST));
- CHECK_ARGUMENT((type & ~(0xff | SLJIT_I32_OP | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_PRE | SLJIT_MEM_POST)) == 0);
+ if (type & SLJIT_MEM_UNALIGNED) {
+ CHECK_ARGUMENT(!(type & (SLJIT_MEM_ALIGNED_16 | SLJIT_MEM_ALIGNED_32)));
+ } else if (type & SLJIT_MEM_ALIGNED_16) {
+ CHECK_ARGUMENT(!(type & SLJIT_MEM_ALIGNED_32));
+ } else {
+ CHECK_ARGUMENT(type & SLJIT_MEM_ALIGNED_32);
+ CHECK_ARGUMENT(!(type & SLJIT_32));
+ }
+
+ CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | SLJIT_MEM_UNALIGNED | SLJIT_MEM_ALIGNED_16 | SLJIT_MEM_ALIGNED_32)));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, type & SLJIT_32));
FUNCTION_CHECK_SRC_MEM(mem, memw);
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg));
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
- if (!(type & SLJIT_MEM_SUPP) && SLJIT_UNLIKELY(!!compiler->verbose)) {
- if (sljit_emit_fmem(compiler, type | SLJIT_MEM_SUPP, freg, mem, memw) == SLJIT_ERR_UNSUPPORTED)
- fprintf(compiler->verbose, " //");
-
- fprintf(compiler->verbose, " fmem.%s%s%s ",
- (type & SLJIT_MEM_STORE) ? "st" : "ld",
- !(type & SLJIT_I32_OP) ? ".f64" : ".f32",
- (type & SLJIT_MEM_PRE) ? ".pre" : ".post");
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " %s.%s",
+ (type & SLJIT_MEM_STORE) ? "store" : "load",
+ !(type & SLJIT_32) ? "f64" : "f32");
+
+ if (type & SLJIT_MEM_UNALIGNED)
+ printf(".unal");
+ else if (type & SLJIT_MEM_ALIGNED_16)
+ printf(".al16");
+ else if (type & SLJIT_MEM_ALIGNED_32)
+ printf(".al32");
+
+ fprintf(compiler->verbose, " ");
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_param(compiler, mem, memw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU));
+ CHECK_ARGUMENT((type & 0xff) == SLJIT_MOV_F64);
+ CHECK_ARGUMENT((type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_POST)) == 0);
+ FUNCTION_CHECK_SRC_MEM(mem, memw);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, type & SLJIT_32));
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (type & SLJIT_MEM_SUPP)
+ CHECK_RETURN_OK;
+ if (sljit_emit_fmem_update(compiler, type | SLJIT_MEM_SUPP, freg, mem, memw) == SLJIT_ERR_UNSUPPORTED) {
+ fprintf(compiler->verbose, " # fmem: unsupported form, no instructions are emitted\n");
+ CHECK_RETURN_OK;
+ }
+
+ fprintf(compiler->verbose, " %s.%s.%s ",
+ (type & SLJIT_MEM_STORE) ? "store" : "load",
+ !(type & SLJIT_32) ? "f64" : "f32",
+ (type & SLJIT_MEM_POST) ? "post" : "pre");
+
sljit_verbose_freg(compiler, freg);
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, mem, memw);
@@ -1944,13 +2601,304 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fmem(struct sljit_compile
CHECK_RETURN_OK;
}
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_SIMD));
+ CHECK_ARGUMENT((type & SLJIT_SIMD_TYPE_MASK2(SLJIT_SIMD_STORE)) == 0);
+ CHECK_ARGUMENT(SLJIT_SIMD_CHECK_REG(type));
+ CHECK_ARGUMENT(SLJIT_SIMD_GET_ELEM_SIZE(type) <= SLJIT_SIMD_GET_REG_SIZE(type));
+ CHECK_ARGUMENT(SLJIT_SIMD_GET_ELEM2_SIZE(type) <= (srcdst & SLJIT_MEM) ? SLJIT_SIMD_GET_REG_SIZE(type) : 0);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, 0));
+ FUNCTION_FCHECK(srcdst, srcdstw, 0);
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (type & SLJIT_SIMD_TEST)
+ CHECK_RETURN_OK;
+ if (sljit_emit_simd_mov(compiler, type | SLJIT_SIMD_TEST, freg, srcdst, srcdstw) == SLJIT_ERR_UNSUPPORTED) {
+ fprintf(compiler->verbose, " # simd_mem: unsupported form, no instructions are emitted\n");
+ CHECK_RETURN_OK;
+ }
+
+ fprintf(compiler->verbose, " simd_%s.%d.%s%d",
+ (type & SLJIT_SIMD_STORE) ? "store" : "load",
+ (8 << SLJIT_SIMD_GET_REG_SIZE(type)),
+ (type & SLJIT_SIMD_FLOAT) ? "f" : "",
+ (8 << SLJIT_SIMD_GET_ELEM_SIZE(type)));
+
+ if ((type & 0x3f000000) == SLJIT_SIMD_MEM_UNALIGNED)
+ fprintf(compiler->verbose, ".unal ");
+ else
+ fprintf(compiler->verbose, ".al%d ", (8 << SLJIT_SIMD_GET_ELEM2_SIZE(type)));
+
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_fparam(compiler, srcdst, srcdstw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_simd_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_SIMD));
+ CHECK_ARGUMENT((type & SLJIT_SIMD_TYPE_MASK(0)) == 0);
+ CHECK_ARGUMENT(SLJIT_SIMD_CHECK_REG(type));
+ CHECK_ARGUMENT(SLJIT_SIMD_GET_ELEM_SIZE(type) < SLJIT_SIMD_GET_REG_SIZE(type));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, 0));
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (src == SLJIT_IMM) {
+ CHECK_ARGUMENT(srcw == 0);
+ } else {
+ FUNCTION_FCHECK(src, srcw, SLJIT_SIMD_GET_ELEM_SIZE(type) == 2);
+ }
+ } else if (src != SLJIT_IMM) {
+ FUNCTION_CHECK_DST(src, srcw);
+ }
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (type & SLJIT_SIMD_TEST)
+ CHECK_RETURN_OK;
+ if (sljit_emit_simd_replicate(compiler, type | SLJIT_SIMD_TEST, freg, src, srcw) == SLJIT_ERR_UNSUPPORTED) {
+ fprintf(compiler->verbose, " # simd_dup: unsupported form, no instructions are emitted\n");
+ CHECK_RETURN_OK;
+ }
+
+ fprintf(compiler->verbose, " simd_replicate.%d.%s%d ",
+ (8 << SLJIT_SIMD_GET_REG_SIZE(type)),
+ (type & SLJIT_SIMD_FLOAT) ? "f" : "",
+ (8 << SLJIT_SIMD_GET_ELEM_SIZE(type)));
+
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, ", ");
+ if (type & SLJIT_SIMD_FLOAT)
+ sljit_verbose_fparam(compiler, src, srcw);
+ else
+ sljit_verbose_param(compiler, src, srcw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_simd_lane_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg, sljit_s32 lane_index,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_SIMD));
+ CHECK_ARGUMENT((type & SLJIT_SIMD_TYPE_MASK(SLJIT_SIMD_STORE | SLJIT_SIMD_LANE_ZERO | SLJIT_SIMD_LANE_SIGNED | SLJIT_32)) == 0);
+ CHECK_ARGUMENT((type & (SLJIT_SIMD_STORE | SLJIT_SIMD_LANE_ZERO)) != (SLJIT_SIMD_STORE | SLJIT_SIMD_LANE_ZERO));
+ CHECK_ARGUMENT((type & (SLJIT_SIMD_STORE | SLJIT_SIMD_LANE_SIGNED)) != SLJIT_SIMD_LANE_SIGNED);
+ CHECK_ARGUMENT(!(type & SLJIT_SIMD_FLOAT) || !(type & (SLJIT_SIMD_LANE_SIGNED | SLJIT_32)));
+ CHECK_ARGUMENT(SLJIT_SIMD_CHECK_REG(type));
+ CHECK_ARGUMENT(SLJIT_SIMD_GET_ELEM_SIZE(type) < SLJIT_SIMD_GET_REG_SIZE(type));
+ CHECK_ARGUMENT(!(type & SLJIT_32) || SLJIT_SIMD_GET_ELEM_SIZE(type) <= 2);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, 0));
+ CHECK_ARGUMENT(lane_index >= 0 && lane_index < (1 << (SLJIT_SIMD_GET_REG_SIZE(type) - SLJIT_SIMD_GET_ELEM_SIZE(type))));
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ FUNCTION_FCHECK(srcdst, srcdstw, SLJIT_SIMD_GET_ELEM_SIZE(type) == 2);
+ } else if ((type & SLJIT_SIMD_STORE) || srcdst != SLJIT_IMM) {
+ FUNCTION_CHECK_DST(srcdst, srcdstw);
+ }
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (type & SLJIT_SIMD_TEST)
+ CHECK_RETURN_OK;
+ if (sljit_emit_simd_lane_mov(compiler, type | SLJIT_SIMD_TEST, freg, lane_index, srcdst, srcdstw) == SLJIT_ERR_UNSUPPORTED) {
+ fprintf(compiler->verbose, " # simd_move_lane: unsupported form, no instructions are emitted\n");
+ CHECK_RETURN_OK;
+ }
+
+ fprintf(compiler->verbose, " simd_%s_lane%s%s%s.%d.%s%d ",
+ (type & SLJIT_SIMD_STORE) ? "store" : "load",
+ (type & SLJIT_32) ? "32" : "",
+ (type & SLJIT_SIMD_LANE_ZERO) ? "_z" : "",
+ (type & SLJIT_SIMD_LANE_SIGNED) ? "_s" : "",
+ (8 << SLJIT_SIMD_GET_REG_SIZE(type)),
+ (type & SLJIT_SIMD_FLOAT) ? "f" : "",
+ (8 << SLJIT_SIMD_GET_ELEM_SIZE(type)));
+
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, "[%d], ", lane_index);
+ if (type & SLJIT_SIMD_FLOAT)
+ sljit_verbose_fparam(compiler, srcdst, srcdstw);
+ else
+ sljit_verbose_param(compiler, srcdst, srcdstw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_simd_lane_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_s32 src_lane_index)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_SIMD));
+ CHECK_ARGUMENT((type & SLJIT_SIMD_TYPE_MASK(0)) == 0);
+ CHECK_ARGUMENT(SLJIT_SIMD_CHECK_REG(type));
+ CHECK_ARGUMENT(SLJIT_SIMD_GET_ELEM_SIZE(type) < SLJIT_SIMD_GET_REG_SIZE(type));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, 0));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(src, 0));
+ CHECK_ARGUMENT(src_lane_index >= 0 && src_lane_index < (1 << (SLJIT_SIMD_GET_REG_SIZE(type) - SLJIT_SIMD_GET_ELEM_SIZE(type))));
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (type & SLJIT_SIMD_TEST)
+ CHECK_RETURN_OK;
+ if (sljit_emit_simd_lane_replicate(compiler, type | SLJIT_SIMD_TEST, freg, src, src_lane_index) == SLJIT_ERR_UNSUPPORTED) {
+ fprintf(compiler->verbose, " # simd_lane_replicate: unsupported form, no instructions are emitted\n");
+ CHECK_RETURN_OK;
+ }
+
+ fprintf(compiler->verbose, " simd_lane_replicate.%d.%s%d ",
+ (8 << SLJIT_SIMD_GET_REG_SIZE(type)),
+ (type & SLJIT_SIMD_FLOAT) ? "f" : "",
+ (8 << SLJIT_SIMD_GET_ELEM_SIZE(type)));
+
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_freg(compiler, src);
+ fprintf(compiler->verbose, "[%d]\n", src_lane_index);
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_simd_extend(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_SIMD));
+ CHECK_ARGUMENT((type & SLJIT_SIMD_TYPE_MASK2(SLJIT_SIMD_EXTEND_SIGNED)) == 0);
+ CHECK_ARGUMENT((type & (SLJIT_SIMD_EXTEND_SIGNED | SLJIT_SIMD_FLOAT)) != (SLJIT_SIMD_EXTEND_SIGNED | SLJIT_SIMD_FLOAT));
+ CHECK_ARGUMENT(SLJIT_SIMD_CHECK_REG(type));
+ CHECK_ARGUMENT(SLJIT_SIMD_GET_ELEM2_SIZE(type) < SLJIT_SIMD_GET_REG_SIZE(type));
+ CHECK_ARGUMENT(SLJIT_SIMD_GET_ELEM_SIZE(type) < SLJIT_SIMD_GET_ELEM2_SIZE(type));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, 0));
+ FUNCTION_FCHECK(src, srcw, SLJIT_SIMD_GET_ELEM_SIZE(type) == 2);
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (type & SLJIT_SIMD_TEST)
+ CHECK_RETURN_OK;
+ if (sljit_emit_simd_extend(compiler, type | SLJIT_SIMD_TEST, freg, src, srcw) == SLJIT_ERR_UNSUPPORTED) {
+ fprintf(compiler->verbose, " # simd_extend: unsupported form, no instructions are emitted\n");
+ CHECK_RETURN_OK;
+ }
+
+ fprintf(compiler->verbose, " simd_load_extend%s.%d.%s%d.%s%d ",
+ (type & SLJIT_SIMD_EXTEND_SIGNED) ? "_s" : "",
+ (8 << SLJIT_SIMD_GET_REG_SIZE(type)),
+ (type & SLJIT_SIMD_FLOAT) ? "f" : "",
+ (8 << SLJIT_SIMD_GET_ELEM2_SIZE(type)),
+ (type & SLJIT_SIMD_FLOAT) ? "f" : "",
+ (8 << SLJIT_SIMD_GET_ELEM_SIZE(type)));
+
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_fparam(compiler, src, srcw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_simd_sign(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 dst, sljit_sw dstw)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_SIMD));
+ CHECK_ARGUMENT((type & SLJIT_SIMD_TYPE_MASK(SLJIT_32)) == SLJIT_SIMD_STORE);
+ CHECK_ARGUMENT(SLJIT_SIMD_CHECK_REG(type));
+ CHECK_ARGUMENT(SLJIT_SIMD_GET_ELEM_SIZE(type) < SLJIT_SIMD_GET_REG_SIZE(type));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg, 0));
+ FUNCTION_CHECK_DST(dst, dstw);
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (type & SLJIT_SIMD_TEST)
+ CHECK_RETURN_OK;
+ if (sljit_emit_simd_sign(compiler, type | SLJIT_SIMD_TEST, freg, dst, dstw) == SLJIT_ERR_UNSUPPORTED) {
+ fprintf(compiler->verbose, " # simd_sign: unsupported form, no instructions are emitted\n");
+ CHECK_RETURN_OK;
+ }
+
+ fprintf(compiler->verbose, " simd_store_sign%s.%d.%s%d ",
+ (type & SLJIT_32) ? "32" : "",
+ (8 << SLJIT_SIMD_GET_REG_SIZE(type)),
+ (type & SLJIT_SIMD_FLOAT) ? "f" : "",
+ (8 << SLJIT_SIMD_GET_ELEM_SIZE(type)));
+
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_param(compiler, dst, dstw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_simd_op2(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src1_freg, sljit_s32 src2_freg)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_SIMD));
+ CHECK_ARGUMENT((type & SLJIT_SIMD_TYPE_MASK(0)) >= SLJIT_SIMD_OP2_AND && (type & SLJIT_SIMD_TYPE_MASK(0)) <= SLJIT_SIMD_OP2_XOR);
+ CHECK_ARGUMENT(SLJIT_SIMD_CHECK_REG(type));
+ CHECK_ARGUMENT(SLJIT_SIMD_GET_ELEM_SIZE(type) <= SLJIT_SIMD_GET_REG_SIZE(type));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(dst_freg, 0));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(src1_freg, 0));
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(src2_freg, 0));
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (type & SLJIT_SIMD_TEST)
+ CHECK_RETURN_OK;
+ if (sljit_emit_simd_op2(compiler, type | SLJIT_SIMD_TEST, dst_freg, src1_freg, src2_freg) == SLJIT_ERR_UNSUPPORTED) {
+ fprintf(compiler->verbose, " # simd_op2: unsupported form, no instructions are emitted\n");
+ CHECK_RETURN_OK;
+ }
+
+ fprintf(compiler->verbose, " simd_%s.%d.%s%d ",
+ simd_op2_names[SLJIT_SIMD_GET_OPCODE(type) - 1],
+ (8 << SLJIT_SIMD_GET_REG_SIZE(type)),
+ (type & SLJIT_SIMD_FLOAT) ? "f" : "",
+ (8 << SLJIT_SIMD_GET_ELEM_SIZE(type)));
+
+ sljit_verbose_freg(compiler, dst_freg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_freg(compiler, src1_freg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_freg(compiler, src2_freg);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
{
/* Any offset is allowed. */
SLJIT_UNUSED_ARG(offset);
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- FUNCTION_CHECK_DST(dst, dstw, 0);
+ FUNCTION_CHECK_DST(dst, dstw);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
@@ -1967,7 +2915,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_const(struct sljit_compil
SLJIT_UNUSED_ARG(init_value);
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- FUNCTION_CHECK_DST(dst, dstw, 0);
+ FUNCTION_CHECK_DST(dst, dstw);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
@@ -1982,7 +2930,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_const(struct sljit_compil
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- FUNCTION_CHECK_DST(dst, dstw, 0);
+ FUNCTION_CHECK_DST(dst, dstw);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
@@ -1994,10 +2942,14 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_put_label(struct sljit_co
CHECK_RETURN_OK;
}
+#else /* !SLJIT_ARGUMENT_CHECKS && !SLJIT_VERBOSE */
+
+#define SLJIT_SKIP_CHECKS(compiler)
+
#endif /* SLJIT_ARGUMENT_CHECKS || SLJIT_VERBOSE */
#define SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw) \
- SLJIT_COMPILE_ASSERT(!(SLJIT_CONV_SW_FROM_F64 & 0x1) && !(SLJIT_CONV_F64_FROM_SW & 0x1), \
+ SLJIT_COMPILE_ASSERT(!(SLJIT_CONV_SW_FROM_F64 & 0x1) && !(SLJIT_CONV_F64_FROM_SW & 0x1) && !(SLJIT_CONV_F64_FROM_UW & 0x1), \
invalid_float_opcodes); \
if (GET_OPCODE(op) >= SLJIT_CONV_SW_FROM_F64 && GET_OPCODE(op) <= SLJIT_CMP_F64) { \
if (GET_OPCODE(op) == SLJIT_CMP_F64) { \
@@ -2012,74 +2964,51 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_put_label(struct sljit_co
ADJUST_LOCAL_OFFSET(src, srcw); \
return sljit_emit_fop1_conv_sw_from_f64(compiler, op, dst, dstw, src, srcw); \
} \
- CHECK(check_sljit_emit_fop1_conv_f64_from_sw(compiler, op, dst, dstw, src, srcw)); \
+ if ((GET_OPCODE(op) | 0x1) == SLJIT_CONV_F64_FROM_S32) { \
+ CHECK(check_sljit_emit_fop1_conv_f64_from_w(compiler, op, dst, dstw, src, srcw)); \
+ ADJUST_LOCAL_OFFSET(dst, dstw); \
+ ADJUST_LOCAL_OFFSET(src, srcw); \
+ return sljit_emit_fop1_conv_f64_from_sw(compiler, op, dst, dstw, src, srcw); \
+ } \
+ CHECK(check_sljit_emit_fop1_conv_f64_from_w(compiler, op, dst, dstw, src, srcw)); \
ADJUST_LOCAL_OFFSET(dst, dstw); \
ADJUST_LOCAL_OFFSET(src, srcw); \
- return sljit_emit_fop1_conv_f64_from_sw(compiler, op, dst, dstw, src, srcw); \
+ return sljit_emit_fop1_conv_f64_from_uw(compiler, op, dst, dstw, src, srcw); \
} \
CHECK(check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw)); \
ADJUST_LOCAL_OFFSET(dst, dstw); \
ADJUST_LOCAL_OFFSET(src, srcw);
-static SLJIT_INLINE sljit_s32 emit_mov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
-{
- /* Return if don't need to do anything. */
- if (op == SLJIT_UNUSED)
- return SLJIT_SUCCESS;
+#if (!(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) || (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6))
-#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE)
- /* At the moment the pointer size is always equal to sljit_sw. May be changed in the future. */
- if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_P))
- return SLJIT_SUCCESS;
-#else
- if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_U32 || op == SLJIT_MOV_S32 || op == SLJIT_MOV_P))
- return SLJIT_SUCCESS;
-#endif
+static sljit_s32 sljit_emit_mem_unaligned(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ SLJIT_SKIP_CHECKS(compiler);
-#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \
- || (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
- compiler->skip_checks = 1;
-#endif
- return sljit_emit_op1(compiler, op, SLJIT_RETURN_REG, 0, src, srcw);
+ if (type & SLJIT_MEM_STORE)
+ return sljit_emit_op1(compiler, type & (0xff | SLJIT_32), mem, memw, reg, 0);
+ return sljit_emit_op1(compiler, type & (0xff | SLJIT_32), reg, 0, mem, memw);
}
-#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
- || (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \
- || (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \
- || ((defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) && !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6))
-
-static SLJIT_INLINE sljit_s32 sljit_emit_cmov_generic(struct sljit_compiler *compiler, sljit_s32 type,
- sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
-{
- struct sljit_label *label;
- struct sljit_jump *jump;
- sljit_s32 op = (dst_reg & SLJIT_I32_OP) ? SLJIT_MOV32 : SLJIT_MOV;
+#endif /* (!SLJIT_CONFIG_MIPS || SLJIT_MIPS_REV >= 6) */
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- jump = sljit_emit_jump(compiler, type ^ 0x1);
- FAIL_IF(!jump);
+#if (!(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) || (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)) \
+ && !(defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32)
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- FAIL_IF(sljit_emit_op1(compiler, op, dst_reg & ~SLJIT_I32_OP, 0, src, srcw));
+static sljit_s32 sljit_emit_fmem_unaligned(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ SLJIT_SKIP_CHECKS(compiler);
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- label = sljit_emit_label(compiler);
- FAIL_IF(!label);
- sljit_set_label(jump, label);
- return SLJIT_SUCCESS;
+ if (type & SLJIT_MEM_STORE)
+ return sljit_emit_fop1(compiler, type & (0xff | SLJIT_32), mem, memw, freg, 0);
+ return sljit_emit_fop1(compiler, type & (0xff | SLJIT_32), freg, 0, mem, memw);
}
-#endif
+#endif /* (!SLJIT_CONFIG_MIPS || SLJIT_MIPS_REV >= 6) && !SLJIT_CONFIG_ARM */
/* CPU description section */
@@ -2109,7 +3038,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_cmov_generic(struct sljit_compiler *com
#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
# include "sljitNativeX86_common.c"
-#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#elif (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
# include "sljitNativeARM_32.c"
#elif (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
# include "sljitNativeARM_32.c"
@@ -2121,13 +3050,81 @@ static SLJIT_INLINE sljit_s32 sljit_emit_cmov_generic(struct sljit_compiler *com
# include "sljitNativePPC_common.c"
#elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
# include "sljitNativeMIPS_common.c"
-#elif (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC)
-# include "sljitNativeSPARC_common.c"
+#elif (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV)
+# include "sljitNativeRISCV_common.c"
#elif (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
# include "sljitNativeS390X.c"
+#elif (defined SLJIT_CONFIG_LOONGARCH && SLJIT_CONFIG_LOONGARCH)
+# include "sljitNativeLOONGARCH_64.c"
#endif
-#if !(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
+static SLJIT_INLINE sljit_s32 emit_mov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+{
+#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE)
+ /* At the moment the pointer size is always equal to sljit_sw. May be changed in the future. */
+ if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_P))
+ return SLJIT_SUCCESS;
+#else
+ if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_U32 || op == SLJIT_MOV_S32 || op == SLJIT_MOV_P))
+ return SLJIT_SUCCESS;
+#endif
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op1(compiler, op, SLJIT_RETURN_REG, 0, src, srcw);
+}
+
+#if !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \
+ && !((defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) && defined __SOFTFP__)
+
+static SLJIT_INLINE sljit_s32 emit_fmov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+{
+ if (src == SLJIT_FR0)
+ return SLJIT_SUCCESS;
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_fop1(compiler, op, SLJIT_RETURN_FREG, 0, src, srcw);
+}
+
+#endif /* !SLJIT_CONFIG_X86_32 && !(SLJIT_CONFIG_ARM_32 && __SOFTFP__) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return(compiler, op, src, srcw));
+
+ if (GET_OPCODE(op) < SLJIT_MOV_F64) {
+ FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
+ } else {
+ FAIL_IF(emit_fmov_before_return(compiler, op, src, srcw));
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_return_void(compiler);
+}
+
+#if !(defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
+ && !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
+ && !(defined(SLJIT_CONFIG_LOONGARCH_64) && SLJIT_CONFIG_LOONGARCH_64)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2r(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fop2r(compiler, op, dst_freg, src1, src1w, src2, src2w));
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_fop2(compiler, op, dst_freg, 0, src1, src1w, src2, src2w);
+}
+
+#endif /* !SLJIT_CONFIG_X86 && !SLJIT_CONFIG_S390X && !SLJIT_CONFIG_LOONGARCH_64 */
+
+#if !(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) \
+ && !(defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \
+ && !(defined SLJIT_CONFIG_LOONGARCH && SLJIT_CONFIG_LOONGARCH)
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 src1, sljit_sw src1w,
@@ -2143,18 +3140,18 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
condition = type & 0xff;
#if (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
if ((condition == SLJIT_EQUAL || condition == SLJIT_NOT_EQUAL)) {
- if ((src1 & SLJIT_IMM) && !src1w) {
+ if (src1 == SLJIT_IMM && !src1w) {
src1 = src2;
src1w = src2w;
src2 = SLJIT_IMM;
src2w = 0;
}
- if ((src2 & SLJIT_IMM) && !src2w)
+ if (src2 == SLJIT_IMM && !src2w)
return emit_cmp_to0(compiler, type, src1, src1w);
}
#endif
- if (SLJIT_UNLIKELY((src1 & SLJIT_IMM) && !(src2 & SLJIT_IMM))) {
+ if (SLJIT_UNLIKELY(src1 == SLJIT_IMM && src2 != SLJIT_IMM)) {
/* Immediate is preferred as second argument by most architectures. */
switch (condition) {
case SLJIT_LESS:
@@ -2183,7 +3180,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
break;
}
- type = condition | (type & (SLJIT_I32_OP | SLJIT_REWRITABLE_JUMP));
+ type = condition | (type & (SLJIT_32 | SLJIT_REWRITABLE_JUMP));
tmp_src = src1;
src1 = src2;
src2 = tmp_src;
@@ -2195,22 +3192,32 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
if (condition <= SLJIT_NOT_ZERO)
flags = SLJIT_SET_Z;
else
- flags = condition << VARIABLE_FLAG_SHIFT;
+ flags = (condition & 0xfe) << VARIABLE_FLAG_SHIFT;
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- PTR_FAIL_IF(sljit_emit_op2(compiler, SLJIT_SUB | flags | (type & SLJIT_I32_OP),
- SLJIT_UNUSED, 0, src1, src1w, src2, src2w));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- return sljit_emit_jump(compiler, condition | (type & (SLJIT_REWRITABLE_JUMP | SLJIT_I32_OP)));
+ SLJIT_SKIP_CHECKS(compiler);
+ PTR_FAIL_IF(sljit_emit_op2u(compiler,
+ SLJIT_SUB | flags | (type & SLJIT_32), src1, src1w, src2, src2w));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_jump(compiler, condition | (type & (SLJIT_REWRITABLE_JUMP | SLJIT_32)));
}
-#endif
+#endif /* !SLJIT_CONFIG_MIPS */
+
+#if (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
+{
+ switch (type) {
+ case SLJIT_UNORDERED_OR_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif /* SLJIT_CONFIG_ARM */
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 src1, sljit_sw src1w,
@@ -2219,490 +3226,235 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compile
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_fcmp(compiler, type, src1, src1w, src2, src2w));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- sljit_emit_fop1(compiler, SLJIT_CMP_F64 | ((type & 0xff) << VARIABLE_FLAG_SHIFT) | (type & SLJIT_I32_OP), src1, src1w, src2, src2w);
+ SLJIT_SKIP_CHECKS(compiler);
+ sljit_emit_fop1(compiler, SLJIT_CMP_F64 | ((type & 0xfe) << VARIABLE_FLAG_SHIFT) | (type & SLJIT_32), src1, src1w, src2, src2w);
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_jump(compiler, type);
}
-#if !(defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \
- && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
+#if !(defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM) \
&& !(defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 reg,
sljit_s32 mem, sljit_sw memw)
{
- SLJIT_UNUSED_ARG(compiler);
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw));
SLJIT_UNUSED_ARG(type);
SLJIT_UNUSED_ARG(reg);
SLJIT_UNUSED_ARG(mem);
SLJIT_UNUSED_ARG(memw);
- CHECK_ERROR();
- CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
-
return SLJIT_ERR_UNSUPPORTED;
}
-#endif
+#endif /* !SLJIT_CONFIG_ARM && !SLJIT_CONFIG_PPC */
-#if !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
- && !(defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
+#if !(defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \
+ && !(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 freg,
sljit_s32 mem, sljit_sw memw)
{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNUSED_ARG(freg);
- SLJIT_UNUSED_ARG(mem);
- SLJIT_UNUSED_ARG(memw);
-
CHECK_ERROR();
CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
- return SLJIT_ERR_UNSUPPORTED;
+ return sljit_emit_fmem_unaligned(compiler, type, freg, mem, memw);
}
-#endif
+#endif /* !SLJIT_CONFIG_ARM_32 && !SLJIT_CONFIG_MIPS */
-#if !(defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
- && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
+#if !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
+ && !(defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
{
CHECK_ERROR();
- CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset));
-
- ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_SP), offset);
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- if (offset != 0)
- return sljit_emit_op2(compiler, SLJIT_ADD, dst, dstw, SLJIT_SP, 0, SLJIT_IMM, offset);
- return sljit_emit_op1(compiler, SLJIT_MOV, dst, dstw, SLJIT_SP, 0);
-}
-
-#endif
-
-#else /* SLJIT_CONFIG_UNSUPPORTED */
-
-/* Empty function bodies for those machines, which are not (yet) supported. */
-
-SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
-{
- return "unsupported";
-}
-
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allocator_data, void *exec_allocator_data)
-{
- SLJIT_UNUSED_ARG(allocator_data);
- SLJIT_UNUSED_ARG(exec_allocator_data);
- SLJIT_UNREACHABLE();
- return NULL;
-}
-
-SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compiler)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNREACHABLE();
-}
-
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_compiler_memory_error(struct sljit_compiler *compiler)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNREACHABLE();
-}
-
-SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_s32 size)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(size);
- SLJIT_UNREACHABLE();
- return NULL;
-}
-
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
-SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *compiler, FILE* verbose)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(verbose);
- SLJIT_UNREACHABLE();
-}
-#endif
+ CHECK(check_sljit_emit_fmem_update(compiler, type, freg, mem, memw));
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(freg);
+ SLJIT_UNUSED_ARG(mem);
+ SLJIT_UNUSED_ARG(memw);
-SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNREACHABLE();
- return NULL;
+ return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
-{
- SLJIT_UNUSED_ARG(feature_type);
- SLJIT_UNREACHABLE();
- return 0;
-}
+#endif /* !SLJIT_CONFIG_ARM_64 && !SLJIT_CONFIG_PPC */
-SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code, void *exec_allocator_data)
-{
- SLJIT_UNUSED_ARG(code);
- SLJIT_UNUSED_ARG(exec_allocator_data);
- SLJIT_UNREACHABLE();
-}
+#if !(defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
+ && !(defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM) \
+ && !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
- sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 srcdst, sljit_sw srcdstw)
{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_mov(compiler, type, freg, srcdst, srcdstw));
SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(options);
- SLJIT_UNUSED_ARG(arg_types);
- SLJIT_UNUSED_ARG(scratches);
- SLJIT_UNUSED_ARG(saveds);
- SLJIT_UNUSED_ARG(fscratches);
- SLJIT_UNUSED_ARG(fsaveds);
- SLJIT_UNUSED_ARG(local_size);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(freg);
+ SLJIT_UNUSED_ARG(srcdst);
+ SLJIT_UNUSED_ARG(srcdstw);
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
- sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(options);
- SLJIT_UNUSED_ARG(arg_types);
- SLJIT_UNUSED_ARG(scratches);
- SLJIT_UNUSED_ARG(saveds);
- SLJIT_UNUSED_ARG(fscratches);
- SLJIT_UNUSED_ARG(fsaveds);
- SLJIT_UNUSED_ARG(local_size);
- SLJIT_UNREACHABLE();
return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_replicate(compiler, type, freg, src, srcw));
SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(op);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(freg);
SLJIT_UNUSED_ARG(src);
SLJIT_UNUSED_ARG(srcw);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(dst);
- SLJIT_UNUSED_ARG(dstw);
- SLJIT_UNREACHABLE();
return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg, sljit_s32 lane_index,
+ sljit_s32 srcdst, sljit_sw srcdstw)
{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_mov(compiler, type, freg, lane_index, srcdst, srcdstw));
SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(op);
- SLJIT_UNREACHABLE();
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(freg);
+ SLJIT_UNUSED_ARG(lane_index);
+ SLJIT_UNUSED_ARG(srcdst);
+ SLJIT_UNUSED_ARG(srcdstw);
+
return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_s32 src_lane_index)
{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_replicate(compiler, type, freg, src, src_lane_index));
SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(op);
- SLJIT_UNUSED_ARG(dst);
- SLJIT_UNUSED_ARG(dstw);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(freg);
SLJIT_UNUSED_ARG(src);
- SLJIT_UNUSED_ARG(srcw);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
+ SLJIT_UNUSED_ARG(src_lane_index);
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src1, sljit_sw src1w,
- sljit_s32 src2, sljit_sw src2w)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(op);
- SLJIT_UNUSED_ARG(dst);
- SLJIT_UNUSED_ARG(dstw);
- SLJIT_UNUSED_ARG(src1);
- SLJIT_UNUSED_ARG(src1w);
- SLJIT_UNUSED_ARG(src2);
- SLJIT_UNUSED_ARG(src2w);
- SLJIT_UNREACHABLE();
return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_extend(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
sljit_s32 src, sljit_sw srcw)
{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_extend(compiler, type, freg, src, srcw));
SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(op);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(freg);
SLJIT_UNUSED_ARG(src);
SLJIT_UNUSED_ARG(srcw);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
-{
- SLJIT_UNREACHABLE();
- return reg;
-}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(instruction);
- SLJIT_UNUSED_ARG(size);
- SLJIT_UNREACHABLE();
return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *compiler, sljit_s32 current_flags)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(current_flags);
-}
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_sign(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 dst, sljit_sw dstw)
{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_sign(compiler, type, freg, dst, dstw));
SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(op);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(freg);
SLJIT_UNUSED_ARG(dst);
SLJIT_UNUSED_ARG(dstw);
- SLJIT_UNUSED_ARG(src);
- SLJIT_UNUSED_ARG(srcw);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src1, sljit_sw src1w,
- sljit_s32 src2, sljit_sw src2w)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(op);
- SLJIT_UNUSED_ARG(dst);
- SLJIT_UNUSED_ARG(dstw);
- SLJIT_UNUSED_ARG(src1);
- SLJIT_UNUSED_ARG(src1w);
- SLJIT_UNUSED_ARG(src2);
- SLJIT_UNUSED_ARG(src2w);
- SLJIT_UNREACHABLE();
return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNREACHABLE();
- return NULL;
-}
-
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNREACHABLE();
- return NULL;
-}
-
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
- sljit_s32 arg_types)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNUSED_ARG(arg_types);
- SLJIT_UNREACHABLE();
- return NULL;
-}
-
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type,
- sljit_s32 src1, sljit_sw src1w,
- sljit_s32 src2, sljit_sw src2w)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNUSED_ARG(src1);
- SLJIT_UNUSED_ARG(src1w);
- SLJIT_UNUSED_ARG(src2);
- SLJIT_UNUSED_ARG(src2w);
- SLJIT_UNREACHABLE();
- return NULL;
-}
-
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_s32 type,
- sljit_s32 src1, sljit_sw src1w,
- sljit_s32 src2, sljit_sw src2w)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_op2(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src1_freg, sljit_s32 src2_freg)
{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_op2(compiler, type, dst_freg, src1_freg, src2_freg));
SLJIT_UNUSED_ARG(compiler);
SLJIT_UNUSED_ARG(type);
- SLJIT_UNUSED_ARG(src1);
- SLJIT_UNUSED_ARG(src1w);
- SLJIT_UNUSED_ARG(src2);
- SLJIT_UNUSED_ARG(src2w);
- SLJIT_UNREACHABLE();
- return NULL;
-}
+ SLJIT_UNUSED_ARG(dst_freg);
+ SLJIT_UNUSED_ARG(src1_freg);
+ SLJIT_UNUSED_ARG(src2_freg);
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_label(struct sljit_jump *jump, struct sljit_label* label)
-{
- SLJIT_UNUSED_ARG(jump);
- SLJIT_UNUSED_ARG(label);
- SLJIT_UNREACHABLE();
+ return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_target(struct sljit_jump *jump, sljit_uw target)
-{
- SLJIT_UNUSED_ARG(jump);
- SLJIT_UNUSED_ARG(target);
- SLJIT_UNREACHABLE();
-}
+#endif /* !SLJIT_CONFIG_X86 && !SLJIT_CONFIG_ARM */
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_put_label(struct sljit_put_label *put_label, struct sljit_label *label)
-{
- SLJIT_UNUSED_ARG(put_label);
- SLJIT_UNUSED_ARG(label);
- SLJIT_UNREACHABLE();
-}
+#if !(defined(SLJIT_CONFIG_X86) && SLJIT_CONFIG_X86) \
+ && !(defined(SLJIT_CONFIG_ARM) && SLJIT_CONFIG_ARM) \
+ && !(defined(SLJIT_CONFIG_S390X) && SLJIT_CONFIG_S390X) \
+ && !(defined(SLJIT_CONFIG_LOONGARCH) && SLJIT_CONFIG_LOONGARCH)
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_load(struct sljit_compiler *compiler,
+ sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 mem_reg)
{
SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNUSED_ARG(src);
- SLJIT_UNUSED_ARG(srcw);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
+ SLJIT_UNUSED_ARG(op);
+ SLJIT_UNUSED_ARG(dst_reg);
+ SLJIT_UNUSED_ARG(mem_reg);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_load(compiler, op, dst_reg, mem_reg));
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
- sljit_s32 arg_types,
- sljit_s32 src, sljit_sw srcw)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNUSED_ARG(arg_types);
- SLJIT_UNUSED_ARG(src);
- SLJIT_UNUSED_ARG(srcw);
- SLJIT_UNREACHABLE();
return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 type)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_store(struct sljit_compiler *compiler,
+ sljit_s32 op,
+ sljit_s32 src_reg,
+ sljit_s32 mem_reg,
+ sljit_s32 temp_reg)
{
SLJIT_UNUSED_ARG(compiler);
SLJIT_UNUSED_ARG(op);
- SLJIT_UNUSED_ARG(dst);
- SLJIT_UNUSED_ARG(dstw);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
+ SLJIT_UNUSED_ARG(src_reg);
+ SLJIT_UNUSED_ARG(mem_reg);
+ SLJIT_UNUSED_ARG(temp_reg);
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
- sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNUSED_ARG(dst_reg);
- SLJIT_UNUSED_ARG(src);
- SLJIT_UNUSED_ARG(srcw);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_store(compiler, op, src_reg, mem_reg, temp_reg));
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 reg, sljit_s32 mem, sljit_sw memw)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNUSED_ARG(reg);
- SLJIT_UNUSED_ARG(mem);
- SLJIT_UNUSED_ARG(memw);
- SLJIT_UNREACHABLE();
return SLJIT_ERR_UNSUPPORTED;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 freg, sljit_s32 mem, sljit_sw memw)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(type);
- SLJIT_UNUSED_ARG(freg);
- SLJIT_UNUSED_ARG(mem);
- SLJIT_UNUSED_ARG(memw);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
+#endif /* !SLJIT_CONFIG_X86 && !SLJIT_CONFIG_ARM && !SLJIT_CONFIG_S390X && !SLJIT_CONFIG_LOONGARCH */
+
+#if !(defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
+ && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(dst);
- SLJIT_UNUSED_ARG(dstw);
- SLJIT_UNUSED_ARG(offset);
- SLJIT_UNREACHABLE();
- return SLJIT_ERR_UNSUPPORTED;
-}
+ CHECK_ERROR();
+ CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset));
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw initval)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(dst);
- SLJIT_UNUSED_ARG(dstw);
- SLJIT_UNUSED_ARG(initval);
- SLJIT_UNREACHABLE();
- return NULL;
-}
+ ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_SP), offset);
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
-{
- SLJIT_UNUSED_ARG(compiler);
- SLJIT_UNUSED_ARG(dst);
- SLJIT_UNUSED_ARG(dstw);
- return NULL;
-}
+ SLJIT_SKIP_CHECKS(compiler);
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
-{
- SLJIT_UNUSED_ARG(addr);
- SLJIT_UNUSED_ARG(new_target);
- SLJIT_UNUSED_ARG(executable_offset);
- SLJIT_UNREACHABLE();
+ if (offset != 0)
+ return sljit_emit_op2(compiler, SLJIT_ADD, dst, dstw, SLJIT_SP, 0, SLJIT_IMM, offset);
+ return sljit_emit_op1(compiler, SLJIT_MOV, dst, dstw, SLJIT_SP, 0);
}
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
-{
- SLJIT_UNUSED_ARG(addr);
- SLJIT_UNUSED_ARG(new_constant);
- SLJIT_UNUSED_ARG(executable_offset);
- SLJIT_UNREACHABLE();
-}
+#endif /* !SLJIT_CONFIG_X86 && !SLJIT_CONFIG_ARM_64 */
#endif /* !SLJIT_CONFIG_UNSUPPORTED */
diff --git a/src/3rdparty/pcre2/src/sljit/sljitLir.h b/src/3rdparty/pcre2/src/sljit/sljitLir.h
index 0eb62fc21b..2ba6683c74 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitLir.h
+++ b/src/3rdparty/pcre2/src/sljit/sljitLir.h
@@ -36,26 +36,24 @@
Advantages:
- The execution can be continued from any LIR instruction. In other
words, it is possible to jump to any label from anywhere, even from
- a code fragment, which is compiled later, if both compiled code
- shares the same context. See sljit_emit_enter for more details
- - Supports self modifying code: target of (conditional) jump and call
+ a code fragment, which is compiled later, as long as the compiling
+ context is the same. See sljit_emit_enter for more details.
+ - Supports self modifying code: target of any jump and call
instructions and some constant values can be dynamically modified
- during runtime
+ during runtime. See SLJIT_REWRITABLE_JUMP.
- although it is not suggested to do it frequently
- can be used for inline caching: save an important value once
in the instruction stream
- - since this feature limits the optimization possibilities, a
- special flag must be passed at compile time when these
- instructions are emitted
- A fixed stack space can be allocated for local variables
- The compiler is thread-safe
- The compiler is highly configurable through preprocessor macros.
You can disable unneeded features (multithreading in single
threaded applications), and you can use your own system functions
- (including memory allocators). See sljitConfig.h
+ (including memory allocators). See sljitConfig.h.
Disadvantages:
- - No automatic register allocation, and temporary results are
- not stored on the stack. (hence the name comes)
+ - The compiler is more like a platform independent assembler, so
+ there is no built-in variable management. Registers and stack must
+ be managed manually (the name of the compiler refers to this).
In practice:
- This approach is very effective for interpreters
- One of the saved registers typically points to a stack interface
@@ -74,10 +72,11 @@
#include "sljitConfigPre.h"
#endif /* SLJIT_HAVE_CONFIG_PRE */
+#include "sljitConfigCPU.h"
#include "sljitConfig.h"
/* The following header file defines useful macros for fine tuning
-sljit based code generators. They are listed in the beginning
+SLJIT based code generators. They are listed in the beginning
of sljitConfigInternal.h */
#include "sljitConfigInternal.h"
@@ -90,6 +89,10 @@ of sljitConfigInternal.h */
extern "C" {
#endif
+/* Version numbers. */
+#define SLJIT_MAJOR_VERSION 0
+#define SLJIT_MINOR_VERSION 95
+
/* --------------------------------------------------------------------- */
/* Error codes */
/* --------------------------------------------------------------------- */
@@ -97,79 +100,70 @@ extern "C" {
/* Indicates no error. */
#define SLJIT_SUCCESS 0
/* After the call of sljit_generate_code(), the error code of the compiler
- is set to this value to avoid future sljit calls (in debug mode at least).
+ is set to this value to avoid further code generation.
The complier should be freed after sljit_generate_code(). */
#define SLJIT_ERR_COMPILED 1
-/* Cannot allocate non executable memory. */
+/* Cannot allocate non-executable memory. */
#define SLJIT_ERR_ALLOC_FAILED 2
/* Cannot allocate executable memory.
- Only for sljit_generate_code() */
+ Only sljit_generate_code() returns with this error code. */
#define SLJIT_ERR_EX_ALLOC_FAILED 3
-/* Return value for SLJIT_CONFIG_UNSUPPORTED placeholder architecture. */
+/* Unsupported instruction form. */
#define SLJIT_ERR_UNSUPPORTED 4
-/* An ivalid argument is passed to any SLJIT function. */
+/* An invalid argument is passed to any SLJIT function. */
#define SLJIT_ERR_BAD_ARGUMENT 5
-/* Dynamic code modification is not enabled. */
-#define SLJIT_ERR_DYN_CODE_MOD 6
/* --------------------------------------------------------------------- */
/* Registers */
/* --------------------------------------------------------------------- */
/*
- Scratch (R) registers: registers whose may not preserve their values
+ Scratch (R) registers: registers which may not preserve their values
across function calls.
- Saved (S) registers: registers whose preserve their values across
+ Saved (S) registers: registers which preserve their values across
function calls.
- The scratch and saved register sets are overlap. The last scratch register
+ The scratch and saved register sets overlap. The last scratch register
is the first saved register, the one before the last is the second saved
register, and so on.
- If an architecture provides two scratch and three saved registers,
- its scratch and saved register sets are the following:
+ For example, in an architecture with only five registers (A-E), if two
+ are scratch and three saved registers, they will be defined as follows:
- R0 | | R0 is always a scratch register
- R1 | | R1 is always a scratch register
- [R2] | S2 | R2 and S2 represent the same physical register
- [R3] | S1 | R3 and S1 represent the same physical register
- [R4] | S0 | R4 and S0 represent the same physical register
+ A | R0 | | R0 always represent scratch register A
+ B | R1 | | R1 always represent scratch register B
+ C | [R2] | S2 | R2 and S2 represent the same physical register C
+ D | [R3] | S1 | R3 and S1 represent the same physical register D
+ E | [R4] | S0 | R4 and S0 represent the same physical register E
- Note: SLJIT_NUMBER_OF_SCRATCH_REGISTERS would be 2 and
- SLJIT_NUMBER_OF_SAVED_REGISTERS would be 3 for this architecture.
+ Note: SLJIT_NUMBER_OF_SCRATCH_REGISTERS will be 2 and
+ SLJIT_NUMBER_OF_SAVED_REGISTERS will be 3.
- Note: On all supported architectures SLJIT_NUMBER_OF_REGISTERS >= 12
+ Note: For all supported architectures SLJIT_NUMBER_OF_REGISTERS >= 12
and SLJIT_NUMBER_OF_SAVED_REGISTERS >= 6. However, 6 registers
are virtual on x86-32. See below.
The purpose of this definition is convenience: saved registers can
- be used as extra scratch registers. For example four registers can
- be specified as scratch registers and the fifth one as saved register
- on the CPU above and any user code which requires four scratch
- registers can run unmodified. The SLJIT compiler automatically saves
- the content of the two extra scratch register on the stack. Scratch
- registers can also be preserved by saving their value on the stack
- but this needs to be done manually.
+ be used as extra scratch registers. For example, building in the
+ previous example, four registers can be specified as scratch registers
+ and the fifth one as saved register, allowing any user code which requires
+ four scratch registers to run unmodified. The SLJIT compiler automatically
+ saves the content of the two extra scratch register on the stack. Scratch
+ registers can also be preserved by saving their value on the stack but
+ that needs to be done manually.
Note: To emphasize that registers assigned to R2-R4 are saved
registers, they are enclosed by square brackets.
- Note: sljit_emit_enter and sljit_set_context defines whether a register
- is S or R register. E.g: when 3 scratches and 1 saved is mapped
- by sljit_emit_enter, the allowed register set will be: R0-R2 and
- S0. Although S2 is mapped to the same position as R2, it does not
- available in the current configuration. Furthermore the S1 register
- is not available at all.
+ Note: sljit_emit_enter and sljit_set_context define whether a register
+ is S or R register. E.g: if in the previous example 3 scratches and
+ 1 saved are mapped by sljit_emit_enter, the allowed register set
+ will be: R0-R2 and S0. Although S2 is mapped to the same register
+ than R2, it is not available in that configuration. Furthermore
+ the S1 register cannot be used at all.
*/
-/* When SLJIT_UNUSED is specified as the destination of sljit_emit_op1
- or sljit_emit_op2 operations the result is discarded. Some status
- flags must be set when the destination is SLJIT_UNUSED, because the
- operation would have no effect otherwise. Other SLJIT operations do
- not support SLJIT_UNUSED as a destination operand. */
-#define SLJIT_UNUSED 0
-
/* Scratch registers. */
#define SLJIT_R0 1
#define SLJIT_R1 2
@@ -216,7 +210,7 @@ extern "C" {
/* The SLJIT_SP provides direct access to the linear stack space allocated by
sljit_emit_enter. It can only be used in the following form: SLJIT_MEM1(SLJIT_SP).
The immediate offset is extended by the relative stack offset automatically.
- The sljit_get_local_base can be used to obtain the absolute offset. */
+ sljit_get_local_base can be used to obtain the real address of a value. */
#define SLJIT_SP (SLJIT_NUMBER_OF_REGISTERS + 1)
/* Return with machine word. */
@@ -228,12 +222,9 @@ extern "C" {
/* --------------------------------------------------------------------- */
/* Each floating point register can store a 32 or a 64 bit precision
- value. The FR and FS register sets are overlap in the same way as R
+ value. The FR and FS register sets overlap in the same way as R
and S register sets. See above. */
-/* Note: SLJIT_UNUSED as destination is not valid for floating point
- operations, since they cannot be used for setting flags. */
-
/* Floating point scratch registers. */
#define SLJIT_FR0 1
#define SLJIT_FR1 2
@@ -241,6 +232,10 @@ extern "C" {
#define SLJIT_FR3 4
#define SLJIT_FR4 5
#define SLJIT_FR5 6
+#define SLJIT_FR6 7
+#define SLJIT_FR7 8
+#define SLJIT_FR8 9
+#define SLJIT_FR9 10
/* All FR registers provided by the architecture can be accessed by SLJIT_FR(i)
The i parameter must be >= 0 and < SLJIT_NUMBER_OF_FLOAT_REGISTERS. */
#define SLJIT_FR(i) (1 + (i))
@@ -252,6 +247,10 @@ extern "C" {
#define SLJIT_FS3 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 3)
#define SLJIT_FS4 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 4)
#define SLJIT_FS5 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 5)
+#define SLJIT_FS6 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 6)
+#define SLJIT_FS7 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 7)
+#define SLJIT_FS8 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 8)
+#define SLJIT_FS9 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 9)
/* All S registers provided by the architecture can be accessed by SLJIT_FS(i)
The i parameter must be >= 0 and < SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS. */
#define SLJIT_FS(i) (SLJIT_NUMBER_OF_FLOAT_REGISTERS - (i))
@@ -259,78 +258,156 @@ extern "C" {
/* Float registers >= SLJIT_FIRST_SAVED_FLOAT_REG are saved registers. */
#define SLJIT_FIRST_SAVED_FLOAT_REG (SLJIT_FS0 - SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS + 1)
+/* Return with floating point arg. */
+
+#define SLJIT_RETURN_FREG SLJIT_FR0
+
/* --------------------------------------------------------------------- */
/* Argument type definitions */
/* --------------------------------------------------------------------- */
-/* Argument type definitions.
- Used by SLJIT_[DEF_]ARGx and SLJIT_[DEF]_RET macros. */
-
-#define SLJIT_ARG_TYPE_VOID 0
-#define SLJIT_ARG_TYPE_SW 1
-#define SLJIT_ARG_TYPE_UW 2
-#define SLJIT_ARG_TYPE_S32 3
-#define SLJIT_ARG_TYPE_U32 4
-#define SLJIT_ARG_TYPE_F32 5
-#define SLJIT_ARG_TYPE_F64 6
-
/* The following argument type definitions are used by sljit_emit_enter,
sljit_set_context, sljit_emit_call, and sljit_emit_icall functions.
- The following return type definitions are used by sljit_emit_call
- and sljit_emit_icall functions.
- When a function is called, the first integer argument must be placed
- in SLJIT_R0, the second in SLJIT_R1, and so on. Similarly the first
- floating point argument must be placed in SLJIT_FR0, the second in
- SLJIT_FR1, and so on.
+ For sljit_emit_call and sljit_emit_icall, the first integer argument
+ must be placed into SLJIT_R0, the second one into SLJIT_R1, and so on.
+ Similarly the first floating point argument must be placed into SLJIT_FR0,
+ the second one into SLJIT_FR1, and so on.
+
+ For sljit_emit_enter, the integer arguments can be stored in scratch
+ or saved registers. Scratch registers are identified by a _R suffix.
+
+ If only saved registers are used, then the allocation mirrors what is
+ done for the "call" functions but using saved registers, meaning that
+ the first integer argument goes to SLJIT_S0, the second one goes into
+ SLJIT_S1, and so on.
+
+ If scratch registers are used, then the way the integer registers are
+ allocated changes so that SLJIT_S0, SLJIT_S1, etc; will be assigned
+ only for the arguments not using scratch registers, while SLJIT_R<n>
+ will be used for the ones using scratch registers.
+
+ Furthermore, the index (shown as "n" above) that will be used for the
+ scratch register depends on how many previous integer registers
+ (scratch or saved) were used already, starting with SLJIT_R0.
+ Eventhough some indexes will be likely skipped, they still need to be
+ accounted for in the scratches parameter of sljit_emit_enter. See below
+ for some examples.
+
+ The floating point arguments always use scratch registers (but not the
+ _R suffix like the integer arguments) and must use SLJIT_FR0, SLJIT_FR1,
+ just like in the "call" functions.
+
+ Note: the mapping for scratch registers is part of the compiler context
+ and therefore a new context after sljit_emit_call/sljit_emit_icall
+ could remove access to some scratch registers that were used as
+ arguments.
Example function definition:
- sljit_f32 SLJIT_FUNC example_c_callback(sljit_sw arg_a,
+ sljit_f32 SLJIT_FUNC example_c_callback(void *arg_a,
sljit_f64 arg_b, sljit_u32 arg_c, sljit_f32 arg_d);
Argument type definition:
- SLJIT_DEF_RET(SLJIT_ARG_TYPE_F32)
- | SLJIT_DEF_ARG1(SLJIT_ARG_TYPE_SW) | SLJIT_DEF_ARG2(SLJIT_ARG_TYPE_F64)
- | SLJIT_DEF_ARG3(SLJIT_ARG_TYPE_U32) | SLJIT_DEF_ARG2(SLJIT_ARG_TYPE_F32)
+ SLJIT_ARG_RETURN(SLJIT_ARG_TYPE_F32)
+ | SLJIT_ARG_VALUE(SLJIT_ARG_TYPE_P, 1) | SLJIT_ARG_VALUE(SLJIT_ARG_TYPE_F64, 2)
+ | SLJIT_ARG_VALUE(SLJIT_ARG_TYPE_32, 3) | SLJIT_ARG_VALUE(SLJIT_ARG_TYPE_F32, 4)
Short form of argument type definition:
- SLJIT_RET(F32) | SLJIT_ARG1(SW) | SLJIT_ARG2(F64)
- | SLJIT_ARG3(S32) | SLJIT_ARG4(F32)
+ SLJIT_ARGS4(F32, P, F64, 32, F32)
Argument passing:
arg_a must be placed in SLJIT_R0
- arg_c must be placed in SLJIT_R1
arg_b must be placed in SLJIT_FR0
+ arg_c must be placed in SLJIT_R1
arg_d must be placed in SLJIT_FR1
-Note:
- The SLJIT_ARG_TYPE_VOID type is only supported by
- SLJIT_DEF_RET, and SLJIT_ARG_TYPE_VOID is also the
- default value when SLJIT_DEF_RET is not specified. */
-#define SLJIT_DEF_SHIFT 4
-#define SLJIT_DEF_RET(type) (type)
-#define SLJIT_DEF_ARG1(type) ((type) << SLJIT_DEF_SHIFT)
-#define SLJIT_DEF_ARG2(type) ((type) << (2 * SLJIT_DEF_SHIFT))
-#define SLJIT_DEF_ARG3(type) ((type) << (3 * SLJIT_DEF_SHIFT))
-#define SLJIT_DEF_ARG4(type) ((type) << (4 * SLJIT_DEF_SHIFT))
+ Examples for argument processing by sljit_emit_enter:
+ SLJIT_ARGS4V(P, 32_R, F32, W)
+ Arguments are placed into: SLJIT_S0, SLJIT_R1, SLJIT_FR0, SLJIT_S1
+ The type of the result is void.
+
+ SLJIT_ARGS4(F32, W, W_R, W, W_R)
+ Arguments are placed into: SLJIT_S0, SLJIT_R1, SLJIT_S1, SLJIT_R3
+ The type of the result is sljit_f32.
+
+ SLJIT_ARGS4(P, W, F32, P_R)
+ Arguments are placed into: SLJIT_FR0, SLJIT_S0, SLJIT_FR1, SLJIT_R1
+ The type of the result is pointer.
+
+ Note: it is recommended to pass the scratch arguments first
+ followed by the saved arguments:
+
+ SLJIT_ARGS4(W, W_R, W_R, W, W)
+ Arguments are placed into: SLJIT_R0, SLJIT_R1, SLJIT_S0, SLJIT_S1
+ The type of the result is sljit_sw / sljit_uw.
+*/
+
+/* The following flag is only allowed for the integer arguments of
+ sljit_emit_enter. When the flag is set, the integer argument is
+ stored in a scratch register instead of a saved register. */
+#define SLJIT_ARG_TYPE_SCRATCH_REG 0x8
+
+/* No return value, only supported by SLJIT_ARG_RETURN. */
+#define SLJIT_ARG_TYPE_RET_VOID 0
+/* Machine word sized integer argument or result. */
+#define SLJIT_ARG_TYPE_W 1
+#define SLJIT_ARG_TYPE_W_R (SLJIT_ARG_TYPE_W | SLJIT_ARG_TYPE_SCRATCH_REG)
+/* 32 bit integer argument or result. */
+#define SLJIT_ARG_TYPE_32 2
+#define SLJIT_ARG_TYPE_32_R (SLJIT_ARG_TYPE_32 | SLJIT_ARG_TYPE_SCRATCH_REG)
+/* Pointer sized integer argument or result. */
+#define SLJIT_ARG_TYPE_P 3
+#define SLJIT_ARG_TYPE_P_R (SLJIT_ARG_TYPE_P | SLJIT_ARG_TYPE_SCRATCH_REG)
+/* 64 bit floating point argument or result. */
+#define SLJIT_ARG_TYPE_F64 4
+/* 32 bit floating point argument or result. */
+#define SLJIT_ARG_TYPE_F32 5
+
+#define SLJIT_ARG_SHIFT 4
+#define SLJIT_ARG_RETURN(type) (type)
+#define SLJIT_ARG_VALUE(type, idx) ((type) << ((idx) * SLJIT_ARG_SHIFT))
+
+/* Simplified argument list definitions.
+
+ The following definition:
+ SLJIT_ARG_RETURN(SLJIT_ARG_TYPE_W) | SLJIT_ARG_VALUE(SLJIT_ARG_TYPE_F32, 1)
-/* Short form of the macros above.
+ can be shortened to:
+ SLJIT_ARGS1(W, F32)
- For example the following definition:
- SLJIT_DEF_RET(SLJIT_ARG_TYPE_SW) | SLJIT_DEF_ARG1(SLJIT_ARG_TYPE_F32)
+ Another example where no value is returned:
+ SLJIT_ARG_RETURN(SLJIT_ARG_TYPE_RET_VOID) | SLJIT_ARG_VALUE(SLJIT_ARG_TYPE_W_R, 1)
can be shortened to:
- SLJIT_RET(SW) | SLJIT_ARG1(F32)
-
-Note:
- The VOID type is only supported by SLJIT_RET, and
- VOID is also the default value when SLJIT_RET is
- not specified. */
-#define SLJIT_RET(type) SLJIT_DEF_RET(SLJIT_ARG_TYPE_ ## type)
-#define SLJIT_ARG1(type) SLJIT_DEF_ARG1(SLJIT_ARG_TYPE_ ## type)
-#define SLJIT_ARG2(type) SLJIT_DEF_ARG2(SLJIT_ARG_TYPE_ ## type)
-#define SLJIT_ARG3(type) SLJIT_DEF_ARG3(SLJIT_ARG_TYPE_ ## type)
-#define SLJIT_ARG4(type) SLJIT_DEF_ARG4(SLJIT_ARG_TYPE_ ## type)
+ SLJIT_ARGS1V(W_R)
+*/
+
+#define SLJIT_ARG_TO_TYPE(type) SLJIT_ARG_TYPE_ ## type
+
+#define SLJIT_ARGS0(ret) \
+ SLJIT_ARG_RETURN(SLJIT_ARG_TO_TYPE(ret))
+#define SLJIT_ARGS0V() \
+ SLJIT_ARG_RETURN(SLJIT_ARG_TYPE_RET_VOID)
+
+#define SLJIT_ARGS1(ret, arg1) \
+ (SLJIT_ARGS0(ret) | SLJIT_ARG_VALUE(SLJIT_ARG_TO_TYPE(arg1), 1))
+#define SLJIT_ARGS1V(arg1) \
+ (SLJIT_ARGS0V() | SLJIT_ARG_VALUE(SLJIT_ARG_TO_TYPE(arg1), 1))
+
+#define SLJIT_ARGS2(ret, arg1, arg2) \
+ (SLJIT_ARGS1(ret, arg1) | SLJIT_ARG_VALUE(SLJIT_ARG_TO_TYPE(arg2), 2))
+#define SLJIT_ARGS2V(arg1, arg2) \
+ (SLJIT_ARGS1V(arg1) | SLJIT_ARG_VALUE(SLJIT_ARG_TO_TYPE(arg2), 2))
+
+#define SLJIT_ARGS3(ret, arg1, arg2, arg3) \
+ (SLJIT_ARGS2(ret, arg1, arg2) | SLJIT_ARG_VALUE(SLJIT_ARG_TO_TYPE(arg3), 3))
+#define SLJIT_ARGS3V(arg1, arg2, arg3) \
+ (SLJIT_ARGS2V(arg1, arg2) | SLJIT_ARG_VALUE(SLJIT_ARG_TO_TYPE(arg3), 3))
+
+#define SLJIT_ARGS4(ret, arg1, arg2, arg3, arg4) \
+ (SLJIT_ARGS3(ret, arg1, arg2, arg3) | SLJIT_ARG_VALUE(SLJIT_ARG_TO_TYPE(arg4), 4))
+#define SLJIT_ARGS4V(arg1, arg2, arg3, arg4) \
+ (SLJIT_ARGS3V(arg1, arg2, arg3) | SLJIT_ARG_VALUE(SLJIT_ARG_TO_TYPE(arg4), 4))
/* --------------------------------------------------------------------- */
/* Main structures and functions */
@@ -358,6 +435,7 @@ struct sljit_label {
struct sljit_jump {
struct sljit_jump *next;
sljit_uw addr;
+ /* Architecture dependent flags. */
sljit_uw flags;
union {
sljit_uw target;
@@ -395,20 +473,20 @@ struct sljit_compiler {
struct sljit_memory_fragment *buf;
struct sljit_memory_fragment *abuf;
- /* Used scratch registers. */
+ /* Available scratch registers. */
sljit_s32 scratches;
- /* Used saved registers. */
+ /* Available saved registers. */
sljit_s32 saveds;
- /* Used float scratch registers. */
+ /* Available float scratch registers. */
sljit_s32 fscratches;
- /* Used float saved registers. */
+ /* Available float saved registers. */
sljit_s32 fsaveds;
/* Local stack size. */
sljit_s32 local_size;
- /* Code size. */
+ /* Maximum code size. */
sljit_uw size;
/* Relative offset of the executable mapping from the writable mapping. */
- sljit_uw executable_offset;
+ sljit_sw executable_offset;
/* Executable size for statistical purposes. */
sljit_uw executable_size;
@@ -417,20 +495,14 @@ struct sljit_compiler {
#endif
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- sljit_s32 args;
- sljit_s32 locals_offset;
- sljit_s32 saveds_offset;
- sljit_s32 stack_tmp_size;
+ sljit_s32 args_size;
#endif
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
sljit_s32 mode32;
-#ifdef _WIN64
- sljit_s32 locals_offset;
-#endif
#endif
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
/* Constant pool handling. */
sljit_uw *cpool;
sljit_u8 *cpool_unique;
@@ -441,13 +513,17 @@ struct sljit_compiler {
sljit_uw patches;
#endif
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
/* Temporary fields. */
sljit_uw shift_imm;
+#endif /* SLJIT_CONFIG_ARM_V6 || SLJIT_CONFIG_ARM_V6 */
+
+#if (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) && (defined __SOFTFP__)
+ sljit_uw args_size;
#endif
#if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
- sljit_sw imm;
+ sljit_u32 imm;
#endif
#if (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
@@ -456,8 +532,11 @@ struct sljit_compiler {
sljit_sw cache_argw;
#endif
-#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
- sljit_s32 delay_slot;
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ sljit_uw args_size;
+#endif
+
+#if (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV)
sljit_s32 cache_arg;
sljit_sw cache_argw;
#endif
@@ -467,6 +546,11 @@ struct sljit_compiler {
sljit_s32 mode;
#endif
+#if (defined SLJIT_CONFIG_LOONGARCH && SLJIT_CONFIG_LOONGARCH)
+ sljit_s32 cache_arg;
+ sljit_sw cache_argw;
+#endif
+
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
FILE* verbose;
#endif
@@ -476,14 +560,17 @@ struct sljit_compiler {
/* Flags specified by the last arithmetic instruction.
It contains the type of the variable flag. */
sljit_s32 last_flags;
- /* Local size passed to the functions. */
+ /* Return value type set by entry functions. */
+ sljit_s32 last_return;
+ /* Local size passed to entry functions. */
sljit_s32 logical_local_size;
#endif
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \
|| (defined SLJIT_DEBUG && SLJIT_DEBUG) \
|| (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
- /* Trust arguments when the API function is called. */
+ /* Trust arguments when an API function is called.
+ Used internally for calling API functions. */
sljit_s32 skip_checks;
#endif
};
@@ -492,7 +579,7 @@ struct sljit_compiler {
/* Main functions */
/* --------------------------------------------------------------------- */
-/* Creates an sljit compiler. The allocator_data is required by some
+/* Creates an SLJIT compiler. The allocator_data is required by some
custom memory managers. This pointer is passed to SLJIT_MALLOC
and SLJIT_FREE macros. Most allocators (including the default
one) ignores this value, and it is recommended to pass NULL
@@ -506,43 +593,44 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allo
/* Frees everything except the compiled machine code. */
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compiler);
-/* Returns the current error code. If an error is occurred, future sljit
- calls which uses the same compiler argument returns early with the same
+/* Returns the current error code. If an error occurres, future calls
+ which uses the same compiler argument returns early with the same
error code. Thus there is no need for checking the error after every
- call, it is enough to do it before the code is compiled. Removing
+ call, it is enough to do it after the code is compiled. Removing
these checks increases the performance of the compiling process. */
static SLJIT_INLINE sljit_s32 sljit_get_compiler_error(struct sljit_compiler *compiler) { return compiler->error; }
/* Sets the compiler error code to SLJIT_ERR_ALLOC_FAILED except
if an error was detected before. After the error code is set
the compiler behaves as if the allocation failure happened
- during an sljit function call. This can greatly simplify error
- checking, since only the compiler status needs to be checked
- after the compilation. */
+ during an SLJIT function call. This can greatly simplify error
+ checking, since it is enough to check the compiler status
+ after the code is compiled. */
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_compiler_memory_error(struct sljit_compiler *compiler);
-/*
- Allocate a small amount of memory. The size must be <= 64 bytes on 32 bit,
+/* Allocate a small amount of memory. The size must be <= 64 bytes on 32 bit,
and <= 128 bytes on 64 bit architectures. The memory area is owned by the
compiler, and freed by sljit_free_compiler. The returned pointer is
sizeof(sljit_sw) aligned. Excellent for allocating small blocks during
- the compiling, and no need to worry about freeing them. The size is
- enough to contain at most 16 pointers. If the size is outside of the range,
+ compiling, and no need to worry about freeing them. The size is enough
+ to contain at most 16 pointers. If the size is outside of the range,
the function will return with NULL. However, this return value does not
indicate that there is no more memory (does not set the current error code
- of the compiler to out-of-memory status).
-*/
+ of the compiler to out-of-memory status). */
SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_s32 size);
+/* Returns the allocator data passed to sljit_create_compiler. These pointers
+ may contain context data even if the normal/exec allocator ignores it. */
+static SLJIT_INLINE void* sljit_get_allocator_data(struct sljit_compiler *compiler) { return compiler->allocator_data; }
+static SLJIT_INLINE void* sljit_get_exec_allocator_data(struct sljit_compiler *compiler) { return compiler->exec_allocator_data; }
+
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
/* Passing NULL disables verbose. */
SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *compiler, FILE* verbose);
#endif
-/*
- Create executable code from the sljit instruction stream. This is the final step
- of the code generation so no more instructions can be added after this call.
-*/
+/* Create executable code from the instruction stream. This is the final step
+ of the code generation so no more instructions can be emitted after this call. */
SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler);
@@ -550,8 +638,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code, void *exec_allocator_data);
-/*
- When the protected executable allocator is used the JIT code is mapped
+/* When the protected executable allocator is used the JIT code is mapped
twice. The first mapping has read/write and the second mapping has read/exec
permissions. This function returns with the relative offset of the executable
mapping using the writable mapping as the base after the machine code is
@@ -559,26 +646,24 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code, void *exec_allocator_d
allocator, since it uses only one mapping with read/write/exec permissions.
Dynamic code modifications requires this value.
- Before a successful code generation, this function returns with 0.
-*/
+ Before a successful code generation, this function returns with 0. */
static SLJIT_INLINE sljit_sw sljit_get_executable_offset(struct sljit_compiler *compiler) { return compiler->executable_offset; }
-/*
- The executable memory consumption of the generated code can be retrieved by
+/* The executable memory consumption of the generated code can be retrieved by
this function. The returned value can be used for statistical purposes.
- Before a successful code generation, this function returns with 0.
-*/
+ Before a successful code generation, this function returns with 0. */
static SLJIT_INLINE sljit_uw sljit_get_generated_code_size(struct sljit_compiler *compiler) { return compiler->executable_size; }
/* Returns with non-zero if the feature or limitation type passed as its
- argument is present on the current CPU.
+ argument is present on the current CPU. The return value is one, if a
+ feature is fully supported, and it is two, if partially supported.
Some features (e.g. floating point operations) require hardware (CPU)
support while others (e.g. move with update) are emulated if not available.
- However even if a feature is emulated, specialized code paths can be faster
- than the emulation. Some limitations are emulated as well so their general
- case is supported but it has extra performance costs. */
+ However, even when a feature is emulated, specialized code paths may be
+ faster than the emulation. Some limitations are emulated as well so their
+ general case is supported but it has extra performance costs. */
/* [Not emulated] Floating-point support is available. */
#define SLJIT_HAS_FPU 0
@@ -588,79 +673,135 @@ static SLJIT_INLINE sljit_uw sljit_get_generated_code_size(struct sljit_compiler
#define SLJIT_HAS_ZERO_REGISTER 2
/* [Emulated] Count leading zero is supported. */
#define SLJIT_HAS_CLZ 3
+/* [Emulated] Count trailing zero is supported. */
+#define SLJIT_HAS_CTZ 4
+/* [Emulated] Reverse the order of bytes is supported. */
+#define SLJIT_HAS_REV 5
+/* [Emulated] Rotate left/right is supported. */
+#define SLJIT_HAS_ROT 6
/* [Emulated] Conditional move is supported. */
-#define SLJIT_HAS_CMOV 4
-/* [Emulated] Conditional move is supported. */
-#define SLJIT_HAS_PREFETCH 5
+#define SLJIT_HAS_CMOV 7
+/* [Emulated] Prefetch instruction is available (emulated as a nop). */
+#define SLJIT_HAS_PREFETCH 8
+/* [Emulated] Copy from/to f32 operation is available (see sljit_emit_fcopy). */
+#define SLJIT_HAS_COPY_F32 9
+/* [Emulated] Copy from/to f64 operation is available (see sljit_emit_fcopy). */
+#define SLJIT_HAS_COPY_F64 10
+/* [Not emulated] The 64 bit floating point registers can be used as
+ two separate 32 bit floating point registers (e.g. ARM32). The
+ second 32 bit part can be accessed by SLJIT_F64_SECOND. */
+#define SLJIT_HAS_F64_AS_F32_PAIR 11
+/* [Not emulated] Some SIMD operations are supported by the compiler. */
+#define SLJIT_HAS_SIMD 12
+/* [Not emulated] SIMD registers are mapped to a pair of double precision
+ floating point registers. E.g. passing either SLJIT_FR0 or SLJIT_FR1 to
+ a simd operation represents the same 128 bit register, and both SLJIT_FR0
+ and SLJIT_FR1 are overwritten. */
+#define SLJIT_SIMD_REGS_ARE_PAIRS 13
+/* [Not emulated] Atomic support is available (fine-grained). */
+#define SLJIT_HAS_ATOMIC 14
#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
-/* [Not emulated] SSE2 support is available on x86. */
-#define SLJIT_HAS_SSE2 100
+/* [Not emulated] AVX support is available on x86. */
+#define SLJIT_HAS_AVX 100
+/* [Not emulated] AVX2 support is available on x86. */
+#define SLJIT_HAS_AVX2 101
#endif
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type);
-/* Instruction generation. Returns with any error code. If there is no
- error, they return with SLJIT_SUCCESS. */
+/* If type is between SLJIT_ORDERED_EQUAL and SLJIT_ORDERED_LESS_EQUAL,
+ sljit_cmp_info returns with:
+ zero - if the cpu supports the floating point comparison type
+ one - if the comparison requires two machine instructions
+ two - if the comparison requires more than two machine instructions
+
+ When the result is non-zero, it is recommended to avoid
+ using the specified comparison type if it is easy to do so.
+
+ Otherwise it returns zero. */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type);
+
+/* The following functions generate machine code. If there is no
+ error, they return with SLJIT_SUCCESS, otherwise they return
+ with an error code. */
/*
The executable code is a function from the viewpoint of the C
- language. The function calls must obey to the ABI (Application
+ language. The function calls must conform to the ABI (Application
Binary Interface) of the platform, which specify the purpose of
machine registers and stack handling among other things. The
sljit_emit_enter function emits the necessary instructions for
- setting up a new context for the executable code and moves function
- arguments to the saved registers. Furthermore the options argument
+ setting up a new context for the executable code. This is often
+ called as function prologue. Furthermore the options argument
can be used to pass configuration options to the compiler. The
available options are listed before sljit_emit_enter.
- The function argument list is the combination of SLJIT_ARGx
- (SLJIT_DEF_ARG1) macros. Currently maximum 3 SW / UW
- (SLJIT_ARG_TYPE_SW / LJIT_ARG_TYPE_UW) arguments are supported.
- The first argument goes to SLJIT_S0, the second goes to SLJIT_S1
- and so on. The register set used by the function must be declared
- as well. The number of scratch and saved registers used by the
- function must be passed to sljit_emit_enter. Only R registers
- between R0 and "scratches" argument can be used later. E.g. if
- "scratches" is set to 2, the scratch register set will be limited
- to SLJIT_R0 and SLJIT_R1. The S registers and the floating point
- registers ("fscratches" and "fsaveds") are specified in a similar
- manner. The sljit_emit_enter is also capable of allocating a stack
- space for local variables. The "local_size" argument contains the
- size in bytes of this local area and its staring address is stored
- in SLJIT_SP. The memory area between SLJIT_SP (inclusive) and
- SLJIT_SP + local_size (exclusive) can be modified freely until
- the function returns. The stack space is not initialized.
+ The function argument list is specified by the SLJIT_ARGSx
+ (SLJIT_ARGS0 .. SLJIT_ARGS4) macros. Currently maximum four
+ arguments are supported. See the description of SLJIT_ARGSx
+ macros about argument passing. Furthermore the register set
+ used by the function must be declared as well. The number of
+ scratch and saved registers available to the function must
+ be passed to sljit_emit_enter. Only R registers between R0
+ and "scratches" argument can be used later. E.g. if "scratches"
+ is set to two, the scratch register set will be limited to
+ SLJIT_R0 and SLJIT_R1. The S registers and the floating point
+ registers ("fscratches" and "fsaveds") are specified in a
+ similar manner. The sljit_emit_enter is also capable of
+ allocating a stack space for local data. The "local_size"
+ argument contains the size in bytes of this local area, and
+ it can be accessed using SLJIT_MEM1(SLJIT_SP). The memory
+ area between SLJIT_SP (inclusive) and SLJIT_SP + local_size
+ (exclusive) can be modified freely until the function returns.
+ The stack space is not initialized to zero.
Note: the following conditions must met:
0 <= scratches <= SLJIT_NUMBER_OF_REGISTERS
- 0 <= saveds <= SLJIT_NUMBER_OF_REGISTERS
+ 0 <= saveds <= SLJIT_NUMBER_OF_SAVED_REGISTERS
scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS
0 <= fscratches <= SLJIT_NUMBER_OF_FLOAT_REGISTERS
- 0 <= fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS
+ 0 <= fsaveds <= SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS
fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS
+ Note: the compiler can use saved registers as scratch registers,
+ but the opposite is not supported
+
Note: every call of sljit_emit_enter and sljit_set_context
overwrites the previous context.
*/
-/* The absolute address returned by sljit_get_local_base with
-offset 0 is aligned to sljit_f64. Otherwise it is aligned to sljit_sw. */
-#define SLJIT_F64_ALIGNMENT 0x00000001
+/* Saved registers between SLJIT_S0 and SLJIT_S(n - 1) (inclusive)
+ are not saved / restored on function enter / return. Instead,
+ these registers can be used to pass / return data (such as
+ global / local context pointers) across function calls. The
+ value of n must be between 1 and 3. This option is only
+ supported by SLJIT_ENTER_REG_ARG calling convention. */
+#define SLJIT_ENTER_KEEP(n) (n)
+
+/* The compiled function uses an SLJIT specific register argument
+ calling convention. This is a lightweight function call type where
+ both the caller and the called functions must be compiled by
+ SLJIT. The type argument of the call must be SLJIT_CALL_REG_ARG
+ and all arguments must be stored in scratch registers. */
+#define SLJIT_ENTER_REG_ARG 0x00000004
/* The local_size must be >= 0 and <= SLJIT_MAX_LOCAL_SIZE. */
-#define SLJIT_MAX_LOCAL_SIZE 65536
+#define SLJIT_MAX_LOCAL_SIZE 1048576
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size);
-/* The machine code has a context (which contains the local stack space size,
- number of used registers, etc.) which initialized by sljit_emit_enter. Several
- functions (like sljit_emit_return) requres this context to be able to generate
- the appropriate code. However, some code fragments (like inline cache) may have
- no normal entry point so their context is unknown for the compiler. Their context
- can be provided to the compiler by the sljit_set_context function.
+/* The SLJIT compiler has a current context (which contains the local
+ stack space size, number of used registers, etc.) which is initialized
+ by sljit_emit_enter. Several functions (such as sljit_emit_return)
+ requires this context to be able to generate the appropriate code.
+ However, some code fragments (compiled separately) may have no
+ normal entry point so their context is unknown to the compiler.
+
+ sljit_set_context and sljit_emit_enter have the same arguments,
+ but sljit_set_context does not generate any machine code.
Note: every call of sljit_emit_enter and sljit_set_context overwrites
the previous context. */
@@ -669,50 +810,42 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size);
-/* Return from machine code. The op argument can be SLJIT_UNUSED which means the
- function does not return with anything or any opcode between SLJIT_MOV and
- SLJIT_MOV_P (see sljit_emit_op1). As for src and srcw they must be 0 if op
- is SLJIT_UNUSED, otherwise see below the description about source and
- destination arguments. */
+/* Return to the caller function. The sljit_emit_return_void function
+ does not return with any value. The sljit_emit_return function returns
+ with a single value loaded from its source operand. The load operation
+ can be between SLJIT_MOV and SLJIT_MOV_P (see sljit_emit_op1) and
+ SLJIT_MOV_F32/SLJIT_MOV_F64 (see sljit_emit_fop1) depending on the
+ return value specified by sljit_emit_enter/sljit_set_context. */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler);
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw);
-/* Generating entry and exit points for fast call functions (see SLJIT_FAST_CALL).
- Both sljit_emit_fast_enter and SLJIT_FAST_RETURN operations preserve the
- values of all registers and stack frame. The return address is stored in the
- dst argument of sljit_emit_fast_enter, and this return address can be passed
- to SLJIT_FAST_RETURN to continue the execution after the fast call.
-
- Fast calls are cheap operations (usually only a single call instruction is
- emitted) but they do not preserve any registers. However the callee function
- can freely use / update any registers and stack values which can be
- efficiently exploited by various optimizations. Registers can be saved
- manually by the callee function if needed.
-
- Although returning to different address by SLJIT_FAST_RETURN is possible,
- this address usually cannot be predicted by the return address predictor of
- modern CPUs which may reduce performance. Furthermore certain security
- enhancement technologies such as Intel Control-flow Enforcement Technology
- (CET) may disallow returning to a different address.
-
- Flags: - (does not modify flags). */
+/* Restores the saved registers and free the stack area, then the execution
+ continues from the address specified by the source operand. This
+ operation is similar to sljit_emit_return, but it ignores the return
+ address. The code where the exection continues should use the same context
+ as the caller function (see sljit_set_context). A word (pointer) value
+ can be passed in the SLJIT_RETURN_REG register. This function can be used
+ to jump to exception handlers. */
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw);
/*
Source and destination operands for arithmetical instructions
imm - a simple immediate value (cannot be used as a destination)
- reg - any of the registers (immediate argument must be 0)
- [imm] - absolute immediate memory address
+ reg - any of the available registers (immediate argument must be 0)
+ [imm] - absolute memory address
[reg+imm] - indirect memory address
[reg+(reg<<imm)] - indirect indexed memory address (shift must be between 0 and 3)
- useful for (byte, half, int, sljit_sw) array access
- (fully supported by both x86 and ARM architectures, and cheap operation on others)
+ useful for accessing arrays (fully supported by both x86 and
+ ARM architectures, and cheap operation on others)
*/
/*
- IMPORTANT NOTE: memory access MUST be naturally aligned unless
+ IMPORTANT NOTE: memory accesses MUST be naturally aligned unless
SLJIT_UNALIGNED macro is defined and its value is 1.
length | alignment
@@ -727,7 +860,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
Note: Different architectures have different addressing limitations.
A single instruction is enough for the following addressing
- modes. Other adrressing modes are emulated by instruction
+ modes. Other addressing modes are emulated by instruction
sequences. This information could help to improve those code
generators which focuses only a few architectures.
@@ -752,11 +885,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
Write-back is supported except for one instruction: 32 bit signed
load with [reg+imm] addressing mode on 64 bit.
mips: [reg+imm], -65536 <= imm <= 65535
- sparc: [reg+imm], -4096 <= imm <= 4095
- [reg+reg] is supported
+ Write-back is not supported
+ riscv: [reg+imm], -2048 <= imm <= 2047
+ Write-back is not supported
s390x: [reg+imm], -2^19 <= imm < 2^19
[reg+reg] is supported
Write-back is not supported
+ loongarch: [reg+imm], -2048 <= imm <= 2047
+ [reg+reg] is supported
+ Write-back is not supported
*/
/* Macros for specifying operand types. */
@@ -764,22 +901,37 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
#define SLJIT_MEM0() (SLJIT_MEM)
#define SLJIT_MEM1(r1) (SLJIT_MEM | (r1))
#define SLJIT_MEM2(r1, r2) (SLJIT_MEM | (r1) | ((r2) << 8))
-#define SLJIT_IMM 0x40
-
-/* Set 32 bit operation mode (I) on 64 bit CPUs. This option is ignored on
+#define SLJIT_IMM 0x7f
+#define SLJIT_REG_PAIR(r1, r2) ((r1) | ((r2) << 8))
+
+/* Macros for checking operand types (only for valid arguments). */
+#define SLJIT_IS_REG(arg) ((arg) > 0 && (arg) < SLJIT_IMM)
+#define SLJIT_IS_MEM(arg) ((arg) & SLJIT_MEM)
+#define SLJIT_IS_MEM0(arg) ((arg) == SLJIT_MEM)
+#define SLJIT_IS_MEM1(arg) ((arg) > SLJIT_MEM && (arg) < (SLJIT_MEM << 1))
+#define SLJIT_IS_MEM2(arg) (((arg) & SLJIT_MEM) && (arg) >= (SLJIT_MEM << 1))
+#define SLJIT_IS_IMM(arg) ((arg) == SLJIT_IMM)
+#define SLJIT_IS_REG_PAIR(arg) (!((arg) & SLJIT_MEM) && (arg) >= (SLJIT_MEM << 1))
+
+/* Sets 32 bit operation mode on 64 bit CPUs. This option is ignored on
32 bit CPUs. When this option is set for an arithmetic operation, only
- the lower 32 bit of the input registers are used, and the CPU status
+ the lower 32 bits of the input registers are used, and the CPU status
flags are set according to the 32 bit result. Although the higher 32 bit
of the input and the result registers are not defined by SLJIT, it might
be defined by the CPU architecture (e.g. MIPS). To satisfy these CPU
requirements all source registers must be the result of those operations
where this option was also set. Memory loads read 32 bit values rather
- than 64 bit ones. In other words 32 bit and 64 bit operations cannot
- be mixed. The only exception is SLJIT_MOV32 and SLJIT_MOVU32 whose source
- register can hold any 32 or 64 bit value, and it is converted to a 32 bit
- compatible format first. This conversion is free (no instructions are
- emitted) on most CPUs. A 32 bit value can also be converted to a 64 bit
- value by SLJIT_MOV_S32 (sign extension) or SLJIT_MOV_U32 (zero extension).
+ than 64 bit ones. In other words 32 bit and 64 bit operations cannot be
+ mixed. The only exception is SLJIT_MOV32 which source register can hold
+ any 32 or 64 bit value, and it is converted to a 32 bit compatible format
+ first. When the source and destination registers are the same, this
+ conversion is free (no instructions are emitted) on most CPUs. A 32 bit
+ value can also be converted to a 64 bit value by SLJIT_MOV_S32
+ (sign extension) or SLJIT_MOV_U32 (zero extension).
+
+ As for floating-point operations, this option sets 32 bit single
+ precision mode. Similar to the integer operations, all register arguments
+ must be the result of those operations where this option was also set.
Note: memory addressing always uses 64 bit values on 64 bit systems so
the result of a 32 bit operation must not be used with SLJIT_MEMx
@@ -788,35 +940,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
This option is part of the instruction name, so there is no need to
manually set it. E.g:
- SLJIT_ADD32 == (SLJIT_ADD | SLJIT_I32_OP) */
-#define SLJIT_I32_OP 0x100
-
-/* Set F32 (single) precision mode for floating-point computation. This
- option is similar to SLJIT_I32_OP, it just applies to floating point
- registers. When this option is passed, the CPU performs 32 bit floating
- point operations, rather than 64 bit one. Similar to SLJIT_I32_OP, all
- register arguments must be the result of those operations where this
- option was also set.
-
- This option is part of the instruction name, so there is no need to
- manually set it. E.g:
-
- SLJIT_MOV_F32 = (SLJIT_MOV_F64 | SLJIT_F32_OP)
- */
-#define SLJIT_F32_OP SLJIT_I32_OP
+ SLJIT_ADD32 == (SLJIT_ADD | SLJIT_32) */
+#define SLJIT_32 0x100
-/* Many CPUs (x86, ARM, PPC) have status flags which can be set according
+/* Many CPUs (x86, ARM, PPC) have status flag bits which can be set according
to the result of an operation. Other CPUs (MIPS) do not have status
- flags, and results must be stored in registers. To cover both architecture
- types efficiently only two flags are defined by SLJIT:
+ flag bits, and results must be stored in registers. To cover both
+ architecture types efficiently only two flags are defined by SLJIT:
* Zero (equal) flag: it is set if the result is zero
- * Variable flag: its value is defined by the last arithmetic operation
+ * Variable flag: its value is defined by the arithmetic operation
SLJIT instructions can set any or both of these flags. The value of
- these flags is undefined if the instruction does not specify their value.
- The description of each instruction contains the list of allowed flag
- types.
+ these flags is undefined if the instruction does not specify their
+ value. The description of each instruction contains the list of
+ allowed flag types.
+
+ Note: the logical or operation can be used to set flags.
Example: SLJIT_ADD can set the Z, OVERFLOW, CARRY flags hence
@@ -837,32 +977,40 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
Sets the variable flag if unsigned overflow (carry) occurs,
clears it otherwise.
- If an instruction (e.g. SLJIT_MOV) does not modify flags the flags are
- unchanged.
+ Certain instructions (e.g. SLJIT_MOV) does not modify flags, so
+ status flags are unchanged.
- Using these flags can reduce the number of emitted instructions. E.g. a
- fast loop can be implemented by decreasing a counter register and set the
- zero flag to jump back if the counter register has not reached zero.
+ Example:
- Motivation: although CPUs can set a large number of flags, usually their
- values are ignored or only one of them is used. Emulating a large number
- of flags on systems without flag register is complicated so SLJIT
- instructions must specify the flag they want to use and only that flag
- will be emulated. The last arithmetic instruction can be repeated if
+ sljit_op2(..., SLJIT_ADD | SLJIT_SET_Z, ...)
+ sljit_op1(..., SLJIT_MOV, ...)
+ Zero flag is set according to the result of SLJIT_ADD.
+
+ sljit_op2(..., SLJIT_ADD | SLJIT_SET_Z, ...)
+ sljit_op2(..., SLJIT_ADD, ...)
+ Zero flag has unknown value.
+
+ These flags can be used for code optimization. E.g. a fast loop can be
+ implemented by decreasing a counter register and set the zero flag
+ using a single instruction. The zero register can be used by a
+ conditional jump to restart the loop. A single comparison can set a
+ zero and less flags to check if a value is less, equal, or greater
+ than another value.
+
+ Motivation: although some CPUs can set a large number of flag bits,
+ usually their values are ignored or only a few of them are used. Emulating
+ a large number of flags on systems without a flag register is complicated
+ so SLJIT instructions must specify the flag they want to use and only
+ that flag is computed. The last arithmetic instruction can be repeated if
multiple flags need to be checked.
*/
/* Set Zero status flag. */
#define SLJIT_SET_Z 0x0200
/* Set the variable status flag if condition is true.
- See comparison types. */
+ See comparison types (e.g. SLJIT_SET_LESS, SLJIT_SET_F_EQUAL). */
#define SLJIT_SET(condition) ((condition) << 10)
-/* Notes:
- - you cannot postpone conditional jump instructions except if noted that
- the instruction does not set flags (See: SLJIT_KEEP_FLAGS).
- - flag combinations: '|' means 'logical or'. */
-
/* Starting index of opcodes for sljit_emit_op0. */
#define SLJIT_OP0_BASE 0
@@ -887,7 +1035,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
The result is placed into SLJIT_R0 and the remainder into SLJIT_R1.
Note: if SLJIT_R1 is 0, the behaviour is undefined. */
#define SLJIT_DIVMOD_UW (SLJIT_OP0_BASE + 4)
-#define SLJIT_DIVMOD_U32 (SLJIT_DIVMOD_UW | SLJIT_I32_OP)
+#define SLJIT_DIVMOD_U32 (SLJIT_DIVMOD_UW | SLJIT_32)
/* Flags: - (may destroy flags)
Signed divide of the value in SLJIT_R0 by the value in SLJIT_R1.
The result is placed into SLJIT_R0 and the remainder into SLJIT_R1.
@@ -895,13 +1043,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
Note: if SLJIT_R1 is -1 and SLJIT_R0 is integer min (0x800..00),
the behaviour is undefined. */
#define SLJIT_DIVMOD_SW (SLJIT_OP0_BASE + 5)
-#define SLJIT_DIVMOD_S32 (SLJIT_DIVMOD_SW | SLJIT_I32_OP)
+#define SLJIT_DIVMOD_S32 (SLJIT_DIVMOD_SW | SLJIT_32)
/* Flags: - (may destroy flags)
Unsigned divide of the value in SLJIT_R0 by the value in SLJIT_R1.
The result is placed into SLJIT_R0. SLJIT_R1 preserves its value.
Note: if SLJIT_R1 is 0, the behaviour is undefined. */
#define SLJIT_DIV_UW (SLJIT_OP0_BASE + 6)
-#define SLJIT_DIV_U32 (SLJIT_DIV_UW | SLJIT_I32_OP)
+#define SLJIT_DIV_U32 (SLJIT_DIV_UW | SLJIT_32)
/* Flags: - (may destroy flags)
Signed divide of the value in SLJIT_R0 by the value in SLJIT_R1.
The result is placed into SLJIT_R0. SLJIT_R1 preserves its value.
@@ -909,14 +1057,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
Note: if SLJIT_R1 is -1 and SLJIT_R0 is integer min (0x800..00),
the behaviour is undefined. */
#define SLJIT_DIV_SW (SLJIT_OP0_BASE + 7)
-#define SLJIT_DIV_S32 (SLJIT_DIV_SW | SLJIT_I32_OP)
+#define SLJIT_DIV_S32 (SLJIT_DIV_SW | SLJIT_32)
/* Flags: - (does not modify flags)
ENDBR32 instruction for x86-32 and ENDBR64 instruction for x86-64
when Intel Control-flow Enforcement Technology (CET) is enabled.
- No instruction for other architectures. */
+ No instructions are emitted for other architectures. */
#define SLJIT_ENDBR (SLJIT_OP0_BASE + 8)
/* Flags: - (may destroy flags)
- Skip stack frames before return. */
+ Skip stack frames before return when Intel Control-flow
+ Enforcement Technology (CET) is enabled. No instructions
+ are emitted for other architectures. */
#define SLJIT_SKIP_FRAMES_BEFORE_RETURN (SLJIT_OP0_BASE + 9)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op);
@@ -941,16 +1091,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
#define SLJIT_MOV (SLJIT_OP1_BASE + 0)
/* Flags: - (does not modify flags) */
#define SLJIT_MOV_U8 (SLJIT_OP1_BASE + 1)
-#define SLJIT_MOV32_U8 (SLJIT_MOV_U8 | SLJIT_I32_OP)
+#define SLJIT_MOV32_U8 (SLJIT_MOV_U8 | SLJIT_32)
/* Flags: - (does not modify flags) */
#define SLJIT_MOV_S8 (SLJIT_OP1_BASE + 2)
-#define SLJIT_MOV32_S8 (SLJIT_MOV_S8 | SLJIT_I32_OP)
+#define SLJIT_MOV32_S8 (SLJIT_MOV_S8 | SLJIT_32)
/* Flags: - (does not modify flags) */
#define SLJIT_MOV_U16 (SLJIT_OP1_BASE + 3)
-#define SLJIT_MOV32_U16 (SLJIT_MOV_U16 | SLJIT_I32_OP)
+#define SLJIT_MOV32_U16 (SLJIT_MOV_U16 | SLJIT_32)
/* Flags: - (does not modify flags) */
#define SLJIT_MOV_S16 (SLJIT_OP1_BASE + 4)
-#define SLJIT_MOV32_S16 (SLJIT_MOV_S16 | SLJIT_I32_OP)
+#define SLJIT_MOV32_S16 (SLJIT_MOV_S16 | SLJIT_32)
/* Flags: - (does not modify flags)
Note: no SLJIT_MOV32_U32 form, since it is the same as SLJIT_MOV32 */
#define SLJIT_MOV_U32 (SLJIT_OP1_BASE + 5)
@@ -958,189 +1108,397 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
Note: no SLJIT_MOV32_S32 form, since it is the same as SLJIT_MOV32 */
#define SLJIT_MOV_S32 (SLJIT_OP1_BASE + 6)
/* Flags: - (does not modify flags) */
-#define SLJIT_MOV32 (SLJIT_MOV_S32 | SLJIT_I32_OP)
+#define SLJIT_MOV32 (SLJIT_OP1_BASE + 7)
/* Flags: - (does not modify flags)
- Note: load a pointer sized data, useful on x32 (a 32 bit mode on x86-64
- where all x64 features are available, e.g. 16 register) or similar
- compiling modes */
-#define SLJIT_MOV_P (SLJIT_OP1_BASE + 7)
-/* Flags: Z
+ Note: loads a pointer sized data, useful on x32 mode (a 64 bit mode
+ on x86-64 which uses 32 bit pointers) or similar compiling modes */
+#define SLJIT_MOV_P (SLJIT_OP1_BASE + 8)
+/* Count leading zeroes
+ Flags: - (may destroy flags)
Note: immediate source argument is not supported */
-#define SLJIT_NOT (SLJIT_OP1_BASE + 8)
-#define SLJIT_NOT32 (SLJIT_NOT | SLJIT_I32_OP)
-/* Flags: Z | OVERFLOW
+#define SLJIT_CLZ (SLJIT_OP1_BASE + 9)
+#define SLJIT_CLZ32 (SLJIT_CLZ | SLJIT_32)
+/* Count trailing zeroes
+ Flags: - (may destroy flags)
Note: immediate source argument is not supported */
-#define SLJIT_NEG (SLJIT_OP1_BASE + 9)
-#define SLJIT_NEG32 (SLJIT_NEG | SLJIT_I32_OP)
-/* Count leading zeroes
+#define SLJIT_CTZ (SLJIT_OP1_BASE + 10)
+#define SLJIT_CTZ32 (SLJIT_CTZ | SLJIT_32)
+/* Reverse the order of bytes
+ Flags: - (may destroy flags)
+ Note: converts between little and big endian formats
+ Note: immediate source argument is not supported */
+#define SLJIT_REV (SLJIT_OP1_BASE + 11)
+#define SLJIT_REV32 (SLJIT_REV | SLJIT_32)
+/* Reverse the order of bytes in the lower 16 bit and extend as unsigned
+ Flags: - (may destroy flags)
+ Note: converts between little and big endian formats
+ Note: immediate source argument is not supported */
+#define SLJIT_REV_U16 (SLJIT_OP1_BASE + 12)
+#define SLJIT_REV32_U16 (SLJIT_REV_U16 | SLJIT_32)
+/* Reverse the order of bytes in the lower 16 bit and extend as signed
+ Flags: - (may destroy flags)
+ Note: converts between little and big endian formats
+ Note: immediate source argument is not supported */
+#define SLJIT_REV_S16 (SLJIT_OP1_BASE + 13)
+#define SLJIT_REV32_S16 (SLJIT_REV_S16 | SLJIT_32)
+/* Reverse the order of bytes in the lower 32 bit and extend as unsigned
Flags: - (may destroy flags)
+ Note: converts between little and big endian formats
Note: immediate source argument is not supported */
-#define SLJIT_CLZ (SLJIT_OP1_BASE + 10)
-#define SLJIT_CLZ32 (SLJIT_CLZ | SLJIT_I32_OP)
+#define SLJIT_REV_U32 (SLJIT_OP1_BASE + 14)
+/* Reverse the order of bytes in the lower 32 bit and extend as signed
+ Flags: - (may destroy flags)
+ Note: converts between little and big endian formats
+ Note: immediate source argument is not supported */
+#define SLJIT_REV_S32 (SLJIT_OP1_BASE + 15)
+
+/* The following unary operations are supported by using sljit_emit_op2:
+ - binary not: SLJIT_XOR with immedate -1 as src1 or src2
+ - negate: SLJIT_SUB with immedate 0 as src1
+ Note: these operations are optimized by the compiler if the
+ target CPU has specialized instruction forms for them. */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw);
/* Starting index of opcodes for sljit_emit_op2. */
-#define SLJIT_OP2_BASE 96
+#define SLJIT_OP2_BASE 64
/* Flags: Z | OVERFLOW | CARRY */
#define SLJIT_ADD (SLJIT_OP2_BASE + 0)
-#define SLJIT_ADD32 (SLJIT_ADD | SLJIT_I32_OP)
+#define SLJIT_ADD32 (SLJIT_ADD | SLJIT_32)
/* Flags: CARRY */
#define SLJIT_ADDC (SLJIT_OP2_BASE + 1)
-#define SLJIT_ADDC32 (SLJIT_ADDC | SLJIT_I32_OP)
+#define SLJIT_ADDC32 (SLJIT_ADDC | SLJIT_32)
/* Flags: Z | LESS | GREATER_EQUAL | GREATER | LESS_EQUAL
SIG_LESS | SIG_GREATER_EQUAL | SIG_GREATER
- SIG_LESS_EQUAL | CARRY */
+ SIG_LESS_EQUAL | OVERFLOW | CARRY */
#define SLJIT_SUB (SLJIT_OP2_BASE + 2)
-#define SLJIT_SUB32 (SLJIT_SUB | SLJIT_I32_OP)
+#define SLJIT_SUB32 (SLJIT_SUB | SLJIT_32)
/* Flags: CARRY */
#define SLJIT_SUBC (SLJIT_OP2_BASE + 3)
-#define SLJIT_SUBC32 (SLJIT_SUBC | SLJIT_I32_OP)
+#define SLJIT_SUBC32 (SLJIT_SUBC | SLJIT_32)
/* Note: integer mul
Flags: OVERFLOW */
#define SLJIT_MUL (SLJIT_OP2_BASE + 4)
-#define SLJIT_MUL32 (SLJIT_MUL | SLJIT_I32_OP)
+#define SLJIT_MUL32 (SLJIT_MUL | SLJIT_32)
/* Flags: Z */
#define SLJIT_AND (SLJIT_OP2_BASE + 5)
-#define SLJIT_AND32 (SLJIT_AND | SLJIT_I32_OP)
+#define SLJIT_AND32 (SLJIT_AND | SLJIT_32)
/* Flags: Z */
#define SLJIT_OR (SLJIT_OP2_BASE + 6)
-#define SLJIT_OR32 (SLJIT_OR | SLJIT_I32_OP)
+#define SLJIT_OR32 (SLJIT_OR | SLJIT_32)
/* Flags: Z */
#define SLJIT_XOR (SLJIT_OP2_BASE + 7)
-#define SLJIT_XOR32 (SLJIT_XOR | SLJIT_I32_OP)
+#define SLJIT_XOR32 (SLJIT_XOR | SLJIT_32)
/* Flags: Z
Let bit_length be the length of the shift operation: 32 or 64.
If src2 is immediate, src2w is masked by (bit_length - 1).
Otherwise, if the content of src2 is outside the range from 0
to bit_length - 1, the result is undefined. */
#define SLJIT_SHL (SLJIT_OP2_BASE + 8)
-#define SLJIT_SHL32 (SLJIT_SHL | SLJIT_I32_OP)
+#define SLJIT_SHL32 (SLJIT_SHL | SLJIT_32)
+/* Flags: Z
+ Same as SLJIT_SHL, except the the second operand is
+ always masked by the length of the shift operation. */
+#define SLJIT_MSHL (SLJIT_OP2_BASE + 9)
+#define SLJIT_MSHL32 (SLJIT_MSHL | SLJIT_32)
/* Flags: Z
Let bit_length be the length of the shift operation: 32 or 64.
If src2 is immediate, src2w is masked by (bit_length - 1).
Otherwise, if the content of src2 is outside the range from 0
to bit_length - 1, the result is undefined. */
-#define SLJIT_LSHR (SLJIT_OP2_BASE + 9)
-#define SLJIT_LSHR32 (SLJIT_LSHR | SLJIT_I32_OP)
+#define SLJIT_LSHR (SLJIT_OP2_BASE + 10)
+#define SLJIT_LSHR32 (SLJIT_LSHR | SLJIT_32)
+/* Flags: Z
+ Same as SLJIT_LSHR, except the the second operand is
+ always masked by the length of the shift operation. */
+#define SLJIT_MLSHR (SLJIT_OP2_BASE + 11)
+#define SLJIT_MLSHR32 (SLJIT_MLSHR | SLJIT_32)
/* Flags: Z
Let bit_length be the length of the shift operation: 32 or 64.
If src2 is immediate, src2w is masked by (bit_length - 1).
Otherwise, if the content of src2 is outside the range from 0
to bit_length - 1, the result is undefined. */
-#define SLJIT_ASHR (SLJIT_OP2_BASE + 10)
-#define SLJIT_ASHR32 (SLJIT_ASHR | SLJIT_I32_OP)
+#define SLJIT_ASHR (SLJIT_OP2_BASE + 12)
+#define SLJIT_ASHR32 (SLJIT_ASHR | SLJIT_32)
+/* Flags: Z
+ Same as SLJIT_ASHR, except the the second operand is
+ always masked by the length of the shift operation. */
+#define SLJIT_MASHR (SLJIT_OP2_BASE + 13)
+#define SLJIT_MASHR32 (SLJIT_MASHR | SLJIT_32)
+/* Flags: - (may destroy flags)
+ Let bit_length be the length of the rotate operation: 32 or 64.
+ The second operand is always masked by (bit_length - 1). */
+#define SLJIT_ROTL (SLJIT_OP2_BASE + 14)
+#define SLJIT_ROTL32 (SLJIT_ROTL | SLJIT_32)
+/* Flags: - (may destroy flags)
+ Let bit_length be the length of the rotate operation: 32 or 64.
+ The second operand is always masked by (bit_length - 1). */
+#define SLJIT_ROTR (SLJIT_OP2_BASE + 15)
+#define SLJIT_ROTR32 (SLJIT_ROTR | SLJIT_32)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w);
-/* Starting index of opcodes for sljit_emit_op2. */
-#define SLJIT_OP_SRC_BASE 128
+/* The sljit_emit_op2u function is the same as sljit_emit_op2
+ except the result is discarded. */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w);
+
+/* Emit a left or right shift operation, where the bits shifted
+ in comes from a separate source operand. All operands are
+ interpreted as unsigned integers.
+
+ In the followings the value_mask variable is 31 for 32 bit
+ operations and word_size - 1 otherwise.
+
+ op must be one of the following operations:
+ SLJIT_SHL or SLJIT_SHL32:
+ dst_reg = src1_reg << src3_reg
+ dst_reg |= ((src2_reg >> 1) >> (src3 ^ value_mask))
+ SLJIT_MSHL or SLJIT_MSHL32:
+ src3 &= value_mask
+ perform the SLJIT_SHL or SLJIT_SHL32 operation
+ SLJIT_LSHR or SLJIT_LSHR32:
+ dst_reg = src1_reg >> src3_reg
+ dst_reg |= ((src2_reg << 1) << (src3 ^ value_mask))
+ SLJIT_MLSHR or SLJIT_MLSHR32:
+ src3 &= value_mask
+ perform the SLJIT_LSHR or SLJIT_LSHR32 operation
+
+ op can be combined (or'ed) with SLJIT_SHIFT_INTO_NON_ZERO
+
+ dst_reg specifies the destination register, where dst_reg
+ and src2_reg cannot be the same registers
+ src1_reg specifies the source register
+ src2_reg specifies the register which is shifted into src1_reg
+ src3 / src3w contains the shift amount
+
+ Note: a rotate operation is performed if src1_reg and
+ src2_reg are the same registers
+
+ Flags: - (may destroy flags) */
-/* Note: src cannot be an immedate value
+/* The src3 operand contains a non-zero value. Improves
+ the generated code on certain architectures, which
+ provides a small performance improvement. */
+#define SLJIT_SHIFT_INTO_NON_ZERO 0x200
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w);
+
+/* Starting index of opcodes for sljit_emit_op_src
+ and sljit_emit_op_dst. */
+#define SLJIT_OP_SRC_DST_BASE 96
+
+/* Fast return, see SLJIT_FAST_CALL for more details.
+ Note: src cannot be an immedate value
Flags: - (does not modify flags) */
-#define SLJIT_FAST_RETURN (SLJIT_OP_SRC_BASE + 0)
+#define SLJIT_FAST_RETURN (SLJIT_OP_SRC_DST_BASE + 0)
/* Skip stack frames before fast return.
Note: src cannot be an immedate value
Flags: may destroy flags. */
-#define SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN (SLJIT_OP_SRC_BASE + 1)
+#define SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN (SLJIT_OP_SRC_DST_BASE + 1)
/* Prefetch value into the level 1 data cache
Note: if the target CPU does not support data prefetch,
no instructions are emitted.
Note: this instruction never fails, even if the memory address is invalid.
Flags: - (does not modify flags) */
-#define SLJIT_PREFETCH_L1 (SLJIT_OP_SRC_BASE + 2)
+#define SLJIT_PREFETCH_L1 (SLJIT_OP_SRC_DST_BASE + 2)
/* Prefetch value into the level 2 data cache
Note: same as SLJIT_PREFETCH_L1 if the target CPU
does not support this instruction form.
Note: this instruction never fails, even if the memory address is invalid.
Flags: - (does not modify flags) */
-#define SLJIT_PREFETCH_L2 (SLJIT_OP_SRC_BASE + 3)
+#define SLJIT_PREFETCH_L2 (SLJIT_OP_SRC_DST_BASE + 3)
/* Prefetch value into the level 3 data cache
Note: same as SLJIT_PREFETCH_L2 if the target CPU
does not support this instruction form.
Note: this instruction never fails, even if the memory address is invalid.
Flags: - (does not modify flags) */
-#define SLJIT_PREFETCH_L3 (SLJIT_OP_SRC_BASE + 4)
+#define SLJIT_PREFETCH_L3 (SLJIT_OP_SRC_DST_BASE + 4)
/* Prefetch a value which is only used once (and can be discarded afterwards)
Note: same as SLJIT_PREFETCH_L1 if the target CPU
does not support this instruction form.
Note: this instruction never fails, even if the memory address is invalid.
Flags: - (does not modify flags) */
-#define SLJIT_PREFETCH_ONCE (SLJIT_OP_SRC_BASE + 5)
+#define SLJIT_PREFETCH_ONCE (SLJIT_OP_SRC_DST_BASE + 5)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw);
+/* Fast enter, see SLJIT_FAST_CALL for more details.
+ Flags: - (does not modify flags) */
+#define SLJIT_FAST_ENTER (SLJIT_OP_SRC_DST_BASE + 6)
+
+/* Copies the return address into dst. The return address is the
+ address where the execution continues after the called function
+ returns (see: sljit_emit_return / sljit_emit_return_void).
+ Flags: - (does not modify flags) */
+#define SLJIT_GET_RETURN_ADDRESS (SLJIT_OP_SRC_DST_BASE + 7)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw);
+
/* Starting index of opcodes for sljit_emit_fop1. */
-#define SLJIT_FOP1_BASE 160
+#define SLJIT_FOP1_BASE 128
/* Flags: - (does not modify flags) */
#define SLJIT_MOV_F64 (SLJIT_FOP1_BASE + 0)
-#define SLJIT_MOV_F32 (SLJIT_MOV_F64 | SLJIT_F32_OP)
+#define SLJIT_MOV_F32 (SLJIT_MOV_F64 | SLJIT_32)
/* Convert opcodes: CONV[DST_TYPE].FROM[SRC_TYPE]
- SRC/DST TYPE can be: D - double, S - single, W - signed word, I - signed int
- Rounding mode when the destination is W or I: round towards zero. */
-/* Flags: - (does not modify flags) */
+ SRC/DST TYPE can be: F64, F32, S32, SW
+ Rounding mode when the destination is SW or S32: round towards zero. */
+/* Flags: - (may destroy flags) */
#define SLJIT_CONV_F64_FROM_F32 (SLJIT_FOP1_BASE + 1)
-#define SLJIT_CONV_F32_FROM_F64 (SLJIT_CONV_F64_FROM_F32 | SLJIT_F32_OP)
-/* Flags: - (does not modify flags) */
+#define SLJIT_CONV_F32_FROM_F64 (SLJIT_CONV_F64_FROM_F32 | SLJIT_32)
+/* Flags: - (may destroy flags) */
#define SLJIT_CONV_SW_FROM_F64 (SLJIT_FOP1_BASE + 2)
-#define SLJIT_CONV_SW_FROM_F32 (SLJIT_CONV_SW_FROM_F64 | SLJIT_F32_OP)
-/* Flags: - (does not modify flags) */
+#define SLJIT_CONV_SW_FROM_F32 (SLJIT_CONV_SW_FROM_F64 | SLJIT_32)
+/* Flags: - (may destroy flags) */
#define SLJIT_CONV_S32_FROM_F64 (SLJIT_FOP1_BASE + 3)
-#define SLJIT_CONV_S32_FROM_F32 (SLJIT_CONV_S32_FROM_F64 | SLJIT_F32_OP)
-/* Flags: - (does not modify flags) */
+#define SLJIT_CONV_S32_FROM_F32 (SLJIT_CONV_S32_FROM_F64 | SLJIT_32)
+/* Flags: - (may destroy flags) */
#define SLJIT_CONV_F64_FROM_SW (SLJIT_FOP1_BASE + 4)
-#define SLJIT_CONV_F32_FROM_SW (SLJIT_CONV_F64_FROM_SW | SLJIT_F32_OP)
-/* Flags: - (does not modify flags) */
+#define SLJIT_CONV_F32_FROM_SW (SLJIT_CONV_F64_FROM_SW | SLJIT_32)
+/* Flags: - (may destroy flags) */
#define SLJIT_CONV_F64_FROM_S32 (SLJIT_FOP1_BASE + 5)
-#define SLJIT_CONV_F32_FROM_S32 (SLJIT_CONV_F64_FROM_S32 | SLJIT_F32_OP)
-/* Note: dst is the left and src is the right operand for SLJIT_CMPD.
+#define SLJIT_CONV_F32_FROM_S32 (SLJIT_CONV_F64_FROM_S32 | SLJIT_32)
+/* Flags: - (may destroy flags) */
+#define SLJIT_CONV_F64_FROM_UW (SLJIT_FOP1_BASE + 6)
+#define SLJIT_CONV_F32_FROM_UW (SLJIT_CONV_F64_FROM_UW | SLJIT_32)
+/* Flags: - (may destroy flags) */
+#define SLJIT_CONV_F64_FROM_U32 (SLJIT_FOP1_BASE + 7)
+#define SLJIT_CONV_F32_FROM_U32 (SLJIT_CONV_F64_FROM_U32 | SLJIT_32)
+/* Note: dst is the left and src is the right operand for SLJIT_CMP_F32/64.
Flags: EQUAL_F | LESS_F | GREATER_EQUAL_F | GREATER_F | LESS_EQUAL_F */
-#define SLJIT_CMP_F64 (SLJIT_FOP1_BASE + 6)
-#define SLJIT_CMP_F32 (SLJIT_CMP_F64 | SLJIT_F32_OP)
-/* Flags: - (does not modify flags) */
-#define SLJIT_NEG_F64 (SLJIT_FOP1_BASE + 7)
-#define SLJIT_NEG_F32 (SLJIT_NEG_F64 | SLJIT_F32_OP)
-/* Flags: - (does not modify flags) */
-#define SLJIT_ABS_F64 (SLJIT_FOP1_BASE + 8)
-#define SLJIT_ABS_F32 (SLJIT_ABS_F64 | SLJIT_F32_OP)
+#define SLJIT_CMP_F64 (SLJIT_FOP1_BASE + 8)
+#define SLJIT_CMP_F32 (SLJIT_CMP_F64 | SLJIT_32)
+/* Flags: - (may destroy flags) */
+#define SLJIT_NEG_F64 (SLJIT_FOP1_BASE + 9)
+#define SLJIT_NEG_F32 (SLJIT_NEG_F64 | SLJIT_32)
+/* Flags: - (may destroy flags) */
+#define SLJIT_ABS_F64 (SLJIT_FOP1_BASE + 10)
+#define SLJIT_ABS_F32 (SLJIT_ABS_F64 | SLJIT_32)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw);
/* Starting index of opcodes for sljit_emit_fop2. */
-#define SLJIT_FOP2_BASE 192
+#define SLJIT_FOP2_BASE 160
-/* Flags: - (does not modify flags) */
+/* Flags: - (may destroy flags) */
#define SLJIT_ADD_F64 (SLJIT_FOP2_BASE + 0)
-#define SLJIT_ADD_F32 (SLJIT_ADD_F64 | SLJIT_F32_OP)
-/* Flags: - (does not modify flags) */
+#define SLJIT_ADD_F32 (SLJIT_ADD_F64 | SLJIT_32)
+/* Flags: - (may destroy flags) */
#define SLJIT_SUB_F64 (SLJIT_FOP2_BASE + 1)
-#define SLJIT_SUB_F32 (SLJIT_SUB_F64 | SLJIT_F32_OP)
-/* Flags: - (does not modify flags) */
+#define SLJIT_SUB_F32 (SLJIT_SUB_F64 | SLJIT_32)
+/* Flags: - (may destroy flags) */
#define SLJIT_MUL_F64 (SLJIT_FOP2_BASE + 2)
-#define SLJIT_MUL_F32 (SLJIT_MUL_F64 | SLJIT_F32_OP)
-/* Flags: - (does not modify flags) */
+#define SLJIT_MUL_F32 (SLJIT_MUL_F64 | SLJIT_32)
+/* Flags: - (may destroy flags) */
#define SLJIT_DIV_F64 (SLJIT_FOP2_BASE + 3)
-#define SLJIT_DIV_F32 (SLJIT_DIV_F64 | SLJIT_F32_OP)
+#define SLJIT_DIV_F32 (SLJIT_DIV_F64 | SLJIT_32)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w);
+/* Starting index of opcodes for sljit_emit_fop2r. */
+#define SLJIT_FOP2R_BASE 168
+
+/* Flags: - (may destroy flags) */
+#define SLJIT_COPYSIGN_F64 (SLJIT_FOP2R_BASE + 0)
+#define SLJIT_COPYSIGN_F32 (SLJIT_COPYSIGN_F64 | SLJIT_32)
+
+/* Similar to sljit_emit_fop2, except the destination is always a register. */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2r(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w);
+
+/* Sets a floating point register to an immediate value. */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value);
+
+/* The following opcodes are used by sljit_emit_fcopy(). */
+
+/* 64 bit: copy a 64 bit value from an integer register into a
+ 64 bit floating point register without any modifications.
+ 32 bit: copy a 32 bit register or register pair into a 64 bit
+ floating point register without any modifications. The
+ register, or the first register of the register pair
+ replaces the high order 32 bit of the floating point
+ register. If a register pair is passed, the low
+ order 32 bit is replaced by the second register.
+ Otherwise, the low order 32 bit is unchanged. */
+#define SLJIT_COPY_TO_F64 1
+/* Copy a 32 bit value from an integer register into a 32 bit
+ floating point register without any modifications. */
+#define SLJIT_COPY32_TO_F32 (SLJIT_COPY_TO_F64 | SLJIT_32)
+/* 64 bit: copy the value of a 64 bit floating point register into
+ an integer register without any modifications.
+ 32 bit: copy a 64 bit floating point register into a 32 bit register
+ or a 32 bit register pair without any modifications. The
+ high order 32 bit of the floating point register is copied
+ into the register, or the first register of the register
+ pair. If a register pair is passed, the low order 32 bit
+ is copied into the second register. */
+#define SLJIT_COPY_FROM_F64 2
+/* Copy the value of a 32 bit floating point register into an integer
+ register without any modifications. The register should be processed
+ with 32 bit operations later. */
+#define SLJIT_COPY32_FROM_F32 (SLJIT_COPY_FROM_F64 | SLJIT_32)
+
+/* Special data copy which involves floating point registers.
+
+ op must be between SLJIT_COPY_TO_F64 and SLJIT_COPY32_FROM_F32
+ freg must be a floating point register
+ reg must be a register or register pair */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg);
+
/* Label and jump instructions. */
SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler);
+/* The SLJIT_FAST_CALL is a calling method for creating lightweight function
+ calls. This type of calls preserve the values of all registers and stack
+ frame. Unlike normal function calls, the enter and return operations must
+ be performed by the SLJIT_FAST_ENTER and SLJIT_FAST_RETURN operations
+ respectively. The return address is stored in the dst argument of the
+ SLJIT_FAST_ENTER operation, and this return address should be passed as
+ the src argument for the SLJIT_FAST_RETURN operation to return from the
+ called function.
+
+ Fast calls are cheap operations (usually only a single call instruction is
+ emitted) but they do not preserve any registers. However the callee function
+ can freely use / update any registers and the locals area which can be
+ efficiently exploited by various optimizations. Registers can be saved
+ and restored manually if needed.
+
+ Although returning to different address by SLJIT_FAST_RETURN is possible,
+ this address usually cannot be predicted by the return address predictor of
+ modern CPUs which may reduce performance. Furthermore certain security
+ enhancement technologies such as Intel Control-flow Enforcement Technology
+ (CET) may disallow returning to a different address (indirect jumps
+ can be used instead, see SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN). */
+
/* Invert (negate) conditional type: xor (^) with 0x1 */
/* Integer comparison types. */
@@ -1152,65 +1510,106 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi
#define SLJIT_LESS 2
#define SLJIT_SET_LESS SLJIT_SET(SLJIT_LESS)
#define SLJIT_GREATER_EQUAL 3
-#define SLJIT_SET_GREATER_EQUAL SLJIT_SET(SLJIT_GREATER_EQUAL)
+#define SLJIT_SET_GREATER_EQUAL SLJIT_SET(SLJIT_LESS)
#define SLJIT_GREATER 4
#define SLJIT_SET_GREATER SLJIT_SET(SLJIT_GREATER)
#define SLJIT_LESS_EQUAL 5
-#define SLJIT_SET_LESS_EQUAL SLJIT_SET(SLJIT_LESS_EQUAL)
+#define SLJIT_SET_LESS_EQUAL SLJIT_SET(SLJIT_GREATER)
#define SLJIT_SIG_LESS 6
#define SLJIT_SET_SIG_LESS SLJIT_SET(SLJIT_SIG_LESS)
#define SLJIT_SIG_GREATER_EQUAL 7
-#define SLJIT_SET_SIG_GREATER_EQUAL SLJIT_SET(SLJIT_SIG_GREATER_EQUAL)
+#define SLJIT_SET_SIG_GREATER_EQUAL SLJIT_SET(SLJIT_SIG_LESS)
#define SLJIT_SIG_GREATER 8
#define SLJIT_SET_SIG_GREATER SLJIT_SET(SLJIT_SIG_GREATER)
#define SLJIT_SIG_LESS_EQUAL 9
-#define SLJIT_SET_SIG_LESS_EQUAL SLJIT_SET(SLJIT_SIG_LESS_EQUAL)
+#define SLJIT_SET_SIG_LESS_EQUAL SLJIT_SET(SLJIT_SIG_GREATER)
#define SLJIT_OVERFLOW 10
#define SLJIT_SET_OVERFLOW SLJIT_SET(SLJIT_OVERFLOW)
#define SLJIT_NOT_OVERFLOW 11
-/* There is no SLJIT_CARRY or SLJIT_NOT_CARRY. */
-#define SLJIT_SET_CARRY SLJIT_SET(12)
-
-/* Floating point comparison types. */
-#define SLJIT_EQUAL_F64 14
-#define SLJIT_EQUAL_F32 (SLJIT_EQUAL_F64 | SLJIT_F32_OP)
-#define SLJIT_SET_EQUAL_F SLJIT_SET(SLJIT_EQUAL_F64)
-#define SLJIT_NOT_EQUAL_F64 15
-#define SLJIT_NOT_EQUAL_F32 (SLJIT_NOT_EQUAL_F64 | SLJIT_F32_OP)
-#define SLJIT_SET_NOT_EQUAL_F SLJIT_SET(SLJIT_NOT_EQUAL_F64)
-#define SLJIT_LESS_F64 16
-#define SLJIT_LESS_F32 (SLJIT_LESS_F64 | SLJIT_F32_OP)
-#define SLJIT_SET_LESS_F SLJIT_SET(SLJIT_LESS_F64)
-#define SLJIT_GREATER_EQUAL_F64 17
-#define SLJIT_GREATER_EQUAL_F32 (SLJIT_GREATER_EQUAL_F64 | SLJIT_F32_OP)
-#define SLJIT_SET_GREATER_EQUAL_F SLJIT_SET(SLJIT_GREATER_EQUAL_F64)
-#define SLJIT_GREATER_F64 18
-#define SLJIT_GREATER_F32 (SLJIT_GREATER_F64 | SLJIT_F32_OP)
-#define SLJIT_SET_GREATER_F SLJIT_SET(SLJIT_GREATER_F64)
-#define SLJIT_LESS_EQUAL_F64 19
-#define SLJIT_LESS_EQUAL_F32 (SLJIT_LESS_EQUAL_F64 | SLJIT_F32_OP)
-#define SLJIT_SET_LESS_EQUAL_F SLJIT_SET(SLJIT_LESS_EQUAL_F64)
-#define SLJIT_UNORDERED_F64 20
-#define SLJIT_UNORDERED_F32 (SLJIT_UNORDERED_F64 | SLJIT_F32_OP)
-#define SLJIT_SET_UNORDERED_F SLJIT_SET(SLJIT_UNORDERED_F64)
-#define SLJIT_ORDERED_F64 21
-#define SLJIT_ORDERED_F32 (SLJIT_ORDERED_F64 | SLJIT_F32_OP)
-#define SLJIT_SET_ORDERED_F SLJIT_SET(SLJIT_ORDERED_F64)
+/* Unlike other flags, sljit_emit_jump may destroy the carry flag. */
+#define SLJIT_CARRY 12
+#define SLJIT_SET_CARRY SLJIT_SET(SLJIT_CARRY)
+#define SLJIT_NOT_CARRY 13
+
+#define SLJIT_ATOMIC_STORED 14
+#define SLJIT_SET_ATOMIC_STORED SLJIT_SET(SLJIT_ATOMIC_STORED)
+#define SLJIT_ATOMIC_NOT_STORED 15
+
+/* Basic floating point comparison types.
+
+ Note: when the comparison result is unordered, their behaviour is unspecified. */
+
+#define SLJIT_F_EQUAL 16
+#define SLJIT_SET_F_EQUAL SLJIT_SET(SLJIT_F_EQUAL)
+#define SLJIT_F_NOT_EQUAL 17
+#define SLJIT_SET_F_NOT_EQUAL SLJIT_SET(SLJIT_F_EQUAL)
+#define SLJIT_F_LESS 18
+#define SLJIT_SET_F_LESS SLJIT_SET(SLJIT_F_LESS)
+#define SLJIT_F_GREATER_EQUAL 19
+#define SLJIT_SET_F_GREATER_EQUAL SLJIT_SET(SLJIT_F_LESS)
+#define SLJIT_F_GREATER 20
+#define SLJIT_SET_F_GREATER SLJIT_SET(SLJIT_F_GREATER)
+#define SLJIT_F_LESS_EQUAL 21
+#define SLJIT_SET_F_LESS_EQUAL SLJIT_SET(SLJIT_F_GREATER)
+
+/* Jumps when either argument contains a NaN value. */
+#define SLJIT_UNORDERED 22
+#define SLJIT_SET_UNORDERED SLJIT_SET(SLJIT_UNORDERED)
+/* Jumps when neither argument contains a NaN value. */
+#define SLJIT_ORDERED 23
+#define SLJIT_SET_ORDERED SLJIT_SET(SLJIT_UNORDERED)
+
+/* Ordered / unordered floating point comparison types.
+
+ Note: each comparison type has an ordered and unordered form. Some
+ architectures supports only either of them (see: sljit_cmp_info). */
+
+#define SLJIT_ORDERED_EQUAL 24
+#define SLJIT_SET_ORDERED_EQUAL SLJIT_SET(SLJIT_ORDERED_EQUAL)
+#define SLJIT_UNORDERED_OR_NOT_EQUAL 25
+#define SLJIT_SET_UNORDERED_OR_NOT_EQUAL SLJIT_SET(SLJIT_ORDERED_EQUAL)
+#define SLJIT_ORDERED_LESS 26
+#define SLJIT_SET_ORDERED_LESS SLJIT_SET(SLJIT_ORDERED_LESS)
+#define SLJIT_UNORDERED_OR_GREATER_EQUAL 27
+#define SLJIT_SET_UNORDERED_OR_GREATER_EQUAL SLJIT_SET(SLJIT_ORDERED_LESS)
+#define SLJIT_ORDERED_GREATER 28
+#define SLJIT_SET_ORDERED_GREATER SLJIT_SET(SLJIT_ORDERED_GREATER)
+#define SLJIT_UNORDERED_OR_LESS_EQUAL 29
+#define SLJIT_SET_UNORDERED_OR_LESS_EQUAL SLJIT_SET(SLJIT_ORDERED_GREATER)
+
+#define SLJIT_UNORDERED_OR_EQUAL 30
+#define SLJIT_SET_UNORDERED_OR_EQUAL SLJIT_SET(SLJIT_UNORDERED_OR_EQUAL)
+#define SLJIT_ORDERED_NOT_EQUAL 31
+#define SLJIT_SET_ORDERED_NOT_EQUAL SLJIT_SET(SLJIT_UNORDERED_OR_EQUAL)
+#define SLJIT_UNORDERED_OR_LESS 32
+#define SLJIT_SET_UNORDERED_OR_LESS SLJIT_SET(SLJIT_UNORDERED_OR_LESS)
+#define SLJIT_ORDERED_GREATER_EQUAL 33
+#define SLJIT_SET_ORDERED_GREATER_EQUAL SLJIT_SET(SLJIT_UNORDERED_OR_LESS)
+#define SLJIT_UNORDERED_OR_GREATER 34
+#define SLJIT_SET_UNORDERED_OR_GREATER SLJIT_SET(SLJIT_UNORDERED_OR_GREATER)
+#define SLJIT_ORDERED_LESS_EQUAL 35
+#define SLJIT_SET_ORDERED_LESS_EQUAL SLJIT_SET(SLJIT_UNORDERED_OR_GREATER)
/* Unconditional jump types. */
-#define SLJIT_JUMP 22
- /* Fast calling method. See sljit_emit_fast_enter / SLJIT_FAST_RETURN. */
-#define SLJIT_FAST_CALL 23
- /* Called function must be declared with the SLJIT_FUNC attribute. */
-#define SLJIT_CALL 24
- /* Called function must be declared with cdecl attribute.
- This is the default attribute for C functions. */
-#define SLJIT_CALL_CDECL 25
+#define SLJIT_JUMP 36
+/* Fast calling method. See the description above. */
+#define SLJIT_FAST_CALL 37
+/* Default C calling convention. */
+#define SLJIT_CALL 38
+/* Called function must be compiled by SLJIT.
+ See SLJIT_ENTER_REG_ARG option. */
+#define SLJIT_CALL_REG_ARG 39
/* The target can be changed during runtime (see: sljit_set_jump_addr). */
#define SLJIT_REWRITABLE_JUMP 0x1000
+/* When this flag is passed, the execution of the current function ends and
+ the called function returns to the caller of the current function. The
+ stack usage is reduced before the call, but it is not necessarily reduced
+ to zero. In the latter case the compiler needs to allocate space for some
+ arguments and the return address must be stored on the stack as well. */
+#define SLJIT_CALL_RETURN 0x2000
/* Emit a jump instruction. The destination is not set, only the type of the jump.
type must be between SLJIT_EQUAL and SLJIT_FAST_CALL
@@ -1220,19 +1619,18 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type);
/* Emit a C compiler (ABI) compatible function call.
- type must be SLJIT_CALL or SLJIT_CALL_CDECL
- type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
- arg_types is the combination of SLJIT_RET / SLJIT_ARGx (SLJIT_DEF_RET / SLJIT_DEF_ARGx) macros
+ type must be SLJIT_CALL or SLJIT_CALL_REG_ARG
+ type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP and/or SLJIT_CALL_RETURN
+ arg_types can be specified by SLJIT_ARGSx (SLJIT_ARG_RETURN / SLJIT_ARG_VALUE) macros
Flags: destroy all flags. */
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 arg_types);
/* Basic arithmetic comparison. In most architectures it is implemented as
- an SLJIT_SUB operation (with SLJIT_UNUSED destination and setting
- appropriate flags) followed by a sljit_emit_jump. However some
- architectures (i.e: ARM64 or MIPS) may employ special optimizations here.
- It is suggested to use this comparison form when appropriate.
- type must be between SLJIT_EQUAL and SLJIT_I_SIG_LESS_EQUAL
+ a compare operation followed by a sljit_emit_jump. However some
+ architectures (i.e: ARM64 or MIPS) may employ special optimizations
+ here. It is suggested to use this comparison form when appropriate.
+ type must be between SLJIT_EQUAL and SLJIT_SIG_LESS_EQUAL
type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
Flags: may destroy flags. */
@@ -1241,15 +1639,14 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
sljit_s32 src2, sljit_sw src2w);
/* Basic floating point comparison. In most architectures it is implemented as
- an SLJIT_FCMP operation (setting appropriate flags) followed by a
+ a SLJIT_CMP_F32/64 operation (setting appropriate flags) followed by a
sljit_emit_jump. However some architectures (i.e: MIPS) may employ
special optimizations here. It is suggested to use this comparison form
when appropriate.
- type must be between SLJIT_EQUAL_F64 and SLJIT_ORDERED_F32
+ type must be between SLJIT_F_EQUAL and SLJIT_ORDERED_LESS_EQUAL
type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
Flags: destroy flags.
- Note: if either operand is NaN, the behaviour is undefined for
- types up to SLJIT_S_LESS_EQUAL. */
+ Note: when an operand is NaN the behaviour depends on the comparison type. */
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w);
@@ -1270,21 +1667,22 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
/* Emit a C compiler (ABI) compatible function call.
Direct form: set src to SLJIT_IMM() and srcw to the address
Indirect form: any other valid addressing mode
- type must be SLJIT_CALL or SLJIT_CALL_CDECL
- arg_types is the combination of SLJIT_RET / SLJIT_ARGx (SLJIT_DEF_RET / SLJIT_DEF_ARGx) macros
+ type must be SLJIT_CALL or SLJIT_CALL_REG_ARG
+ type can be combined (or'ed) with SLJIT_CALL_RETURN
+ arg_types can be specified by SLJIT_ARGSx (SLJIT_ARG_RETURN / SLJIT_ARG_VALUE) macros
Flags: destroy all flags. */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 arg_types, sljit_s32 src, sljit_sw srcw);
-/* Perform the operation using the conditional flags as the second argument.
- Type must always be between SLJIT_EQUAL and SLJIT_ORDERED_F64. The value
- represented by the type is 1, if the condition represented by the type
- is fulfilled, and 0 otherwise.
+/* Perform an operation using the conditional flags as the second argument.
+ Type must always be between SLJIT_EQUAL and SLJIT_ORDERED_LESS_EQUAL.
+ The value represented by the type is 1, if the condition represented
+ by the type is fulfilled, and 0 otherwise.
- If op == SLJIT_MOV, SLJIT_MOV32:
+ When op is SLJIT_MOV or SLJIT_MOV32:
Set dst to the value represented by the type (0 or 1).
Flags: - (does not modify flags)
- If op == SLJIT_OR, op == SLJIT_AND, op == SLJIT_XOR
+ When op is SLJIT_AND, SLJIT_AND32, SLJIT_OR, SLJIT_OR32, SLJIT_XOR, or SLJIT_XOR32
Performs the binary operation using dst as the first, and the value
represented by type as the second argument. Result is written into dst.
Flags: Z (may destroy flags) */
@@ -1292,73 +1690,446 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type);
-/* Emit a conditional mov instruction which moves source to destination,
- if the condition is satisfied. Unlike other arithmetic operations this
- instruction does not support memory access.
+/* Emit a conditional select instruction which moves src1 to dst_reg,
+ if the condition is satisfied, or src2_reg to dst_reg otherwise.
- type must be between SLJIT_EQUAL and SLJIT_ORDERED_F64
- dst_reg must be a valid register and it can be combined
- with SLJIT_I32_OP to perform a 32 bit arithmetic operation
- src must be register or immediate (SLJIT_IMM)
+ type must be between SLJIT_EQUAL and SLJIT_ORDERED_LESS_EQUAL
+ type can be combined (or'ed) with SLJIT_32 to move 32 bit
+ register values instead of word sized ones
+ dst_reg and src2_reg must be valid registers
+ src1 must be valid operand
+
+ Note: if src1 is a memory operand, its value
+ might be loaded even if the condition is false.
Flags: - (does not modify flags) */
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw);
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg);
+
+/* Emit a conditional floating point select instruction which moves
+ src1 to dst_reg, if the condition is satisfied, or src2_reg to
+ dst_reg otherwise.
+
+ type must be between SLJIT_EQUAL and SLJIT_ORDERED_LESS_EQUAL
+ type can be combined (or'ed) with SLJIT_32 to move 32 bit
+ floating point values instead of 64 bit ones
+ dst_freg and src2_freg must be valid floating point registers
+ src1 must be valid operand
+
+ Note: if src1 is a memory operand, its value
+ might be loaded even if the condition is false.
+
+ Flags: - (does not modify flags) */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg);
+
+/* The following flags are used by sljit_emit_mem(), sljit_emit_mem_update(),
+ sljit_emit_fmem(), and sljit_emit_fmem_update(). */
+
+/* Memory load operation. This is the default. */
+#define SLJIT_MEM_LOAD 0x000000
+/* Memory store operation. */
+#define SLJIT_MEM_STORE 0x000200
/* The following flags are used by sljit_emit_mem() and sljit_emit_fmem(). */
+/* Load or stora data from an unaligned (byte aligned) address. */
+#define SLJIT_MEM_UNALIGNED 0x000400
+/* Load or stora data from a 16 bit aligned address. */
+#define SLJIT_MEM_ALIGNED_16 0x000800
+/* Load or stora data from a 32 bit aligned address. */
+#define SLJIT_MEM_ALIGNED_32 0x001000
+
+/* The following flags are used by sljit_emit_mem_update(),
+ and sljit_emit_fmem_update(). */
+
+/* Base register is updated before the memory access (default). */
+#define SLJIT_MEM_PRE 0x000000
+/* Base register is updated after the memory access. */
+#define SLJIT_MEM_POST 0x000400
+
/* When SLJIT_MEM_SUPP is passed, no instructions are emitted.
Instead the function returns with SLJIT_SUCCESS if the instruction
form is supported and SLJIT_ERR_UNSUPPORTED otherwise. This flag
allows runtime checking of available instruction forms. */
-#define SLJIT_MEM_SUPP 0x0200
-/* Memory load operation. This is the default. */
-#define SLJIT_MEM_LOAD 0x0000
-/* Memory store operation. */
-#define SLJIT_MEM_STORE 0x0400
-/* Base register is updated before the memory access. */
-#define SLJIT_MEM_PRE 0x0800
-/* Base register is updated after the memory access. */
-#define SLJIT_MEM_POST 0x1000
-
-/* Emit a single memory load or store with update instruction. When the
- requested instruction form is not supported by the CPU, it returns
- with SLJIT_ERR_UNSUPPORTED instead of emulating the instruction. This
- allows specializing tight loops based on the supported instruction
- forms (see SLJIT_MEM_SUPP flag).
+#define SLJIT_MEM_SUPP 0x000800
+
+/* The sljit_emit_mem emits instructions for various memory operations:
+
+ When SLJIT_MEM_UNALIGNED / SLJIT_MEM_ALIGNED_16 /
+ SLJIT_MEM_ALIGNED_32 is set in type argument:
+ Emit instructions for unaligned memory loads or stores. When
+ SLJIT_UNALIGNED is not defined, the only way to access unaligned
+ memory data is using sljit_emit_mem. Otherwise all operations (e.g.
+ sljit_emit_op1/2, or sljit_emit_fop1/2) supports unaligned access.
+ In general, the performance of unaligned memory accesses are often
+ lower than aligned and should be avoided.
+
+ When a pair of registers is passed in reg argument:
+ Emit instructions for moving data between a register pair and
+ memory. The register pair can be specified by the SLJIT_REG_PAIR
+ macro. The first register is loaded from or stored into the
+ location specified by the mem/memw arguments, and the end address
+ of this operation is the starting address of the data transfer
+ between the second register and memory. The type argument must
+ be SLJIT_MOV. The SLJIT_MEM_UNALIGNED / SLJIT_MEM_ALIGNED_*
+ options are allowed for this operation.
type must be between SLJIT_MOV and SLJIT_MOV_P and can be
- combined with SLJIT_MEM_* flags. Either SLJIT_MEM_PRE
- or SLJIT_MEM_POST must be specified.
- reg is the source or destination register, and must be
- different from the base register of the mem operand
- mem must be a SLJIT_MEM1() or SLJIT_MEM2() operand
+ combined (or'ed) with SLJIT_MEM_* flags
+ reg is a register or register pair, which is the source or
+ destination of the operation
+ mem must be a memory operand
Flags: - (does not modify flags) */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 reg,
sljit_s32 mem, sljit_sw memw);
+/* Emit a single memory load or store with update instruction.
+ When the requested instruction form is not supported by the CPU,
+ it returns with SLJIT_ERR_UNSUPPORTED instead of emulating the
+ instruction. This allows specializing tight loops based on
+ the supported instruction forms (see SLJIT_MEM_SUPP flag).
+ Absolute address (SLJIT_MEM0) forms are never supported
+ and the base (first) register specified by the mem argument
+ must not be SLJIT_SP and must also be different from the
+ register specified by the reg argument.
+
+ type must be between SLJIT_MOV and SLJIT_MOV_P and can be
+ combined (or'ed) with SLJIT_MEM_* flags
+ reg is the source or destination register of the operation
+ mem must be a memory operand
+
+ Flags: - (does not modify flags) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw);
+
/* Same as sljit_emit_mem except the followings:
+ Loading or storing a pair of registers is not supported.
+
type must be SLJIT_MOV_F64 or SLJIT_MOV_F32 and can be
- combined with SLJIT_MEM_* flags. Either SLJIT_MEM_PRE
- or SLJIT_MEM_POST must be specified.
- freg is the source or destination floating point register */
+ combined (or'ed) with SLJIT_MEM_* flags.
+ freg is the source or destination floating point register
+ of the operation
+ mem must be a memory operand
+
+ Flags: - (does not modify flags) */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 freg,
sljit_s32 mem, sljit_sw memw);
-/* Copies the base address of SLJIT_SP + offset to dst. The offset can be
- anything to negate the effect of relative addressing. For example if an
- array of sljit_sw values is stored on the stack from offset 0x40, and R0
- contains the offset of an array item plus 0x120, this item can be
- overwritten by two SLJIT instructions:
+/* Same as sljit_emit_mem_update except the followings:
+
+ type must be SLJIT_MOV_F64 or SLJIT_MOV_F32 and can be
+ combined (or'ed) with SLJIT_MEM_* flags
+ freg is the source or destination floating point register
+ of the operation
+ mem must be a memory operand
+
+ Flags: - (does not modify flags) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw);
+
+/* The following options are used by several simd operations. */
+
+/* Load data into a simd register, this is the default */
+#define SLJIT_SIMD_LOAD 0x000000
+/* Store data from a simd register */
+#define SLJIT_SIMD_STORE 0x000001
+/* The simd register contains floating point values */
+#define SLJIT_SIMD_FLOAT 0x000400
+/* Tests whether the operation is available */
+#define SLJIT_SIMD_TEST 0x000800
+/* Move data to/from a 64 bit (8 byte) long SIMD register */
+#define SLJIT_SIMD_REG_64 (3 << 12)
+/* Move data to/from a 128 bit (16 byte) long SIMD register */
+#define SLJIT_SIMD_REG_128 (4 << 12)
+/* Move data to/from a 256 bit (32 byte) long SIMD register */
+#define SLJIT_SIMD_REG_256 (5 << 12)
+/* Move data to/from a 512 bit (64 byte) long SIMD register */
+#define SLJIT_SIMD_REG_512 (6 << 12)
+/* Element size is 8 bit long (this is the default), usually cannot be combined with SLJIT_SIMD_FLOAT */
+#define SLJIT_SIMD_ELEM_8 (0 << 18)
+/* Element size is 16 bit long, usually cannot be combined with SLJIT_SIMD_FLOAT */
+#define SLJIT_SIMD_ELEM_16 (1 << 18)
+/* Element size is 32 bit long */
+#define SLJIT_SIMD_ELEM_32 (2 << 18)
+/* Element size is 64 bit long */
+#define SLJIT_SIMD_ELEM_64 (3 << 18)
+/* Element size is 128 bit long */
+#define SLJIT_SIMD_ELEM_128 (4 << 18)
+/* Element size is 256 bit long */
+#define SLJIT_SIMD_ELEM_256 (5 << 18)
+
+/* The following options are used by sljit_emit_simd_mov(). */
+
+/* Memory address is unaligned (this is the default) */
+#define SLJIT_SIMD_MEM_UNALIGNED (0 << 24)
+/* Memory address is 16 bit aligned */
+#define SLJIT_SIMD_MEM_ALIGNED_16 (1 << 24)
+/* Memory address is 32 bit aligned */
+#define SLJIT_SIMD_MEM_ALIGNED_32 (2 << 24)
+/* Memory address is 64 bit aligned */
+#define SLJIT_SIMD_MEM_ALIGNED_64 (3 << 24)
+/* Memory address is 128 bit aligned */
+#define SLJIT_SIMD_MEM_ALIGNED_128 (4 << 24)
+/* Memory address is 256 bit aligned */
+#define SLJIT_SIMD_MEM_ALIGNED_256 (5 << 24)
+/* Memory address is 512 bit aligned */
+#define SLJIT_SIMD_MEM_ALIGNED_512 (6 << 24)
+
+/* Moves data between a simd register and memory.
+
+ If the operation is not supported, it returns with
+ SLJIT_ERR_UNSUPPORTED. If SLJIT_SIMD_TEST is passed,
+ it does not emit any instructions.
+
+ type must be a combination of SLJIT_SIMD_* and
+ SLJIT_SIMD_MEM_* options
+ freg is the source or destination simd register
+ of the operation
+ srcdst must be a memory operand or a simd register
+
+ Note:
+ The alignment and element size must be
+ less or equal than simd register size.
+
+ Flags: - (does not modify flags) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 srcdst, sljit_sw srcdstw);
+
+/* Replicates a scalar value to all lanes of a simd
+ register.
+
+ If the operation is not supported, it returns with
+ SLJIT_ERR_UNSUPPORTED. If SLJIT_SIMD_TEST is passed,
+ it does not emit any instructions.
+
+ type must be a combination of SLJIT_SIMD_* options
+ except SLJIT_SIMD_STORE.
+ freg is the destination simd register of the operation
+ src is the value which is replicated
+
+ Note:
+ The src == SLJIT_IMM and srcw == 0 can be used to
+ clear a register even when SLJIT_SIMD_FLOAT is set.
+
+ Flags: - (does not modify flags) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw);
+
+/* The following options are used by sljit_emit_simd_lane_mov(). */
+
+/* Clear all bits of the simd register before loading the lane. */
+#define SLJIT_SIMD_LANE_ZERO 0x000002
+/* Sign extend the integer value stored from the lane. */
+#define SLJIT_SIMD_LANE_SIGNED 0x000004
+
+/* Moves data between a simd register lane and a register or
+ memory. If the srcdst argument is a register, it must be
+ a floating point register when SLJIT_SIMD_FLOAT is specified,
+ or a general purpose register otherwise.
+
+ If the operation is not supported, it returns with
+ SLJIT_ERR_UNSUPPORTED. If SLJIT_SIMD_TEST is passed,
+ it does not emit any instructions.
+
+ type must be a combination of SLJIT_SIMD_* options
+ Further options:
+ SLJIT_32 - when SLJIT_SIMD_FLOAT is not set
+ SLJIT_SIMD_LANE_SIGNED - when SLJIT_SIMD_STORE
+ is set and SLJIT_SIMD_FLOAT is not set
+ SLJIT_SIMD_LANE_ZERO - when SLJIT_SIMD_LOAD
+ is specified
+ freg is the source or destination simd register
+ of the operation
+ lane_index is the index of the lane
+ srcdst is the destination operand for loads, and
+ source operand for stores
+
+ Note:
+ The elem size must be lower than register size.
+
+ Flags: - (does not modify flags) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg, sljit_s32 lane_index,
+ sljit_s32 srcdst, sljit_sw srcdstw);
+
+/* Replicates a scalar value from a lane to all lanes
+ of a simd register.
+
+ If the operation is not supported, it returns with
+ SLJIT_ERR_UNSUPPORTED. If SLJIT_SIMD_TEST is passed,
+ it does not emit any instructions.
+
+ type must be a combination of SLJIT_SIMD_* options
+ except SLJIT_SIMD_STORE.
+ freg is the destination simd register of the operation
+ src is the simd register which lane is replicated
+ src_lane_index is the lane index of the src register
+
+ Flags: - (does not modify flags) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_s32 src_lane_index);
+
+/* The following options are used by sljit_emit_simd_load_extend(). */
+
+/* Sign extend the integer elements */
+#define SLJIT_SIMD_EXTEND_SIGNED 0x000002
+/* Extend data to 16 bit */
+#define SLJIT_SIMD_EXTEND_16 (1 << 24)
+/* Extend data to 32 bit */
+#define SLJIT_SIMD_EXTEND_32 (2 << 24)
+/* Extend data to 64 bit */
+#define SLJIT_SIMD_EXTEND_64 (3 << 24)
+
+/* Extend elements and stores them in a simd register.
+ The extension operation increases the size of the
+ elements (e.g. from 16 bit to 64 bit). For integer
+ values, the extension can be signed or unsigned.
+
+ If the operation is not supported, it returns with
+ SLJIT_ERR_UNSUPPORTED. If SLJIT_SIMD_TEST is passed,
+ it does not emit any instructions.
+
+ type must be a combination of SLJIT_SIMD_*, and
+ SLJIT_SIMD_EXTEND_* options except SLJIT_SIMD_STORE
+ freg is the destination simd register of the operation
+ src must be a memory operand or a simd register.
+ In the latter case, the source elements are stored
+ in the lower half of the register.
+
+ Flags: - (does not modify flags) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_extend(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw);
+
+/* Extract the highest bit (usually the sign bit) from
+ each elements of a vector.
+
+ If the operation is not supported, it returns with
+ SLJIT_ERR_UNSUPPORTED. If SLJIT_SIMD_TEST is passed,
+ it does not emit any instructions.
+
+ type must be a combination of SLJIT_SIMD_* and SLJIT_32
+ options except SLJIT_SIMD_LOAD
+ freg is the source simd register of the operation
+ dst is the destination operand
+
+ Flags: - (does not modify flags) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_sign(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 dst, sljit_sw dstw);
+
+/* The following options are used by sljit_emit_simd_op2(). */
+
+/* Binary 'and' operation */
+#define SLJIT_SIMD_OP2_AND 0x000001
+/* Binary 'or' operation */
+#define SLJIT_SIMD_OP2_OR 0x000002
+/* Binary 'xor' operation */
+#define SLJIT_SIMD_OP2_XOR 0x000003
+
+/* Perform simd operations using simd registers.
+
+ If the operation is not supported, it returns with
+ SLJIT_ERR_UNSUPPORTED. If SLJIT_SIMD_TEST is passed,
+ it does not emit any instructions.
+
+ type must be a combination of SLJIT_SIMD_* and SLJIT_SIMD_OP2_
+ options except SLJIT_SIMD_LOAD and SLJIT_SIMD_STORE
+ dst_freg is the destination register of the operation
+ src1_freg is the first source register of the operation
+ src1_freg is the second source register of the operation
+
+ Flags: - (does not modify flags) */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_op2(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src1_freg, sljit_s32 src2_freg);
+
+/* The sljit_emit_atomic_load and sljit_emit_atomic_store operation pair
+ can perform an atomic read-modify-write operation. First, an unsigned
+ value must be loaded from memory using sljit_emit_atomic_load. Then,
+ the updated value must be written back to the same memory location by
+ sljit_emit_atomic_store. A thread can only perform a single atomic
+ operation at a time.
+
+ Note: atomic operations are experimental, and not implemented
+ for all cpus.
+
+ The following conditions must be satisfied, or the operation
+ is undefined:
+ - the address provided in mem_reg must be divisible by the size of
+ the value (only naturally aligned updates are supported)
+ - no memory writes are allowed between the load and store operations
+ regardless of its target address (currently read operations are
+ allowed, but this might change in the future)
+ - the memory operation (op) and the base address (stored in mem_reg)
+ passed to the load/store operations must be the same (the mem_reg
+ can be a different register, only its value must be the same)
+ - an store must always follow a load for the same transaction.
+
+ op must be between SLJIT_MOV and SLJIT_MOV_P, excluding all
+ signed loads such as SLJIT_MOV32_S16
+ dst_reg is the register where the data will be loaded into
+ mem_reg is the base address of the memory load (it cannot be
+ SLJIT_SP or a virtual register on x86-32)
+
+ Flags: - (does not modify flags) */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_load(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 mem_reg);
+
+/* The sljit_emit_atomic_load and sljit_emit_atomic_store operations
+ allows performing an atomic read-modify-write operation. See the
+ description of sljit_emit_atomic_load.
+
+ op must be between SLJIT_MOV and SLJIT_MOV_P, excluding all signed
+ loads such as SLJIT_MOV32_S16
+ src_reg is the register which value is stored into the memory
+ mem_reg is the base address of the memory store (it cannot be
+ SLJIT_SP or a virtual register on x86-32)
+ temp_reg is a not preserved scratch register, which must be
+ initialized with the value loaded into the dst_reg during the
+ corresponding sljit_emit_atomic_load operation, or the operation
+ is undefined
+
+ Flags: ATOMIC_STORED is set if the operation is successful,
+ otherwise the memory remains unchanged. */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_store(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src_reg,
+ sljit_s32 mem_reg,
+ sljit_s32 temp_reg);
+
+/* Copies the base address of SLJIT_SP + offset to dst. The offset can
+ represent the starting address of a value in the local data (stack).
+ The offset is not limited by the local data limits, it can be any value.
+ For example if an array of bytes are stored on the stack from
+ offset 0x40, and R0 contains the offset of an array item plus 0x120,
+ this item can be changed by two SLJIT instructions:
sljit_get_local_base(compiler, SLJIT_R1, 0, 0x40 - 0x120);
- sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_MEM2(SLJIT_R1, SLJIT_R0), 0, SLJIT_IMM, 0x5);
+ sljit_emit_op1(compiler, SLJIT_MOV_U8, SLJIT_MEM2(SLJIT_R1, SLJIT_R0), 0, SLJIT_IMM, 0x5);
Flags: - (may destroy flags) */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset);
@@ -1387,18 +2158,80 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset);
/* --------------------------------------------------------------------- */
-/* Miscellaneous utility functions */
+/* CPU specific functions */
/* --------------------------------------------------------------------- */
-#define SLJIT_MAJOR_VERSION 0
-#define SLJIT_MINOR_VERSION 94
+/* Types for sljit_get_register_index */
+
+/* General purpose (integer) registers. */
+#define SLJIT_GP_REGISTER 0
+/* Floating point registers. */
+#define SLJIT_FLOAT_REGISTER 1
+
+/* The following function is a helper function for sljit_emit_op_custom.
+ It returns with the real machine register index ( >=0 ) of any registers.
+
+ When type is SLJIT_GP_REGISTER:
+ reg must be an SLJIT_R(i), SLJIT_S(i), or SLJIT_SP register
+
+ When type is SLJIT_FLOAT_REGISTER:
+ reg must be an SLJIT_FR(i) or SLJIT_FS(i) register
+
+ When type is SLJIT_SIMD_REG_64 / 128 / 256 / 512 :
+ reg must be an SLJIT_FR(i) or SLJIT_FS(i) register
+
+ Note: it returns with -1 for unknown registers, such as virtual
+ registers on x86-32 or unsupported simd registers. */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg);
+
+/* Any instruction can be inserted into the instruction stream by
+ sljit_emit_op_custom. It has a similar purpose as inline assembly.
+ The size parameter must match to the instruction size of the target
+ architecture:
+
+ x86: 0 < size <= 15, the instruction argument can be byte aligned.
+ Thumb2: if size == 2, the instruction argument must be 2 byte aligned.
+ if size == 4, the instruction argument must be 4 byte aligned.
+ s390x: size can be 2, 4, or 6, the instruction argument can be byte aligned.
+ Otherwise: size must be 4 and instruction argument must be 4 byte aligned. */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
+ void *instruction, sljit_u32 size);
+
+/* Flags were set by a 32 bit operation. */
+#define SLJIT_CURRENT_FLAGS_32 SLJIT_32
+
+/* Flags were set by an ADD or ADDC operations. */
+#define SLJIT_CURRENT_FLAGS_ADD 0x01
+/* Flags were set by a SUB, SUBC, or NEG operation. */
+#define SLJIT_CURRENT_FLAGS_SUB 0x02
+
+/* Flags were set by sljit_emit_op2u with SLJIT_SUB opcode.
+ Must be combined with SLJIT_CURRENT_FLAGS_SUB. */
+#define SLJIT_CURRENT_FLAGS_COMPARE 0x04
+
+/* Define the currently available CPU status flags. It is usually used after
+ an sljit_emit_label or sljit_emit_op_custom operations to define which CPU
+ status flags are available.
+
+ The current_flags must be a valid combination of SLJIT_SET_* and
+ SLJIT_CURRENT_FLAGS_* constants. */
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *compiler,
+ sljit_s32 current_flags);
+
+/* --------------------------------------------------------------------- */
+/* Miscellaneous utility functions */
+/* --------------------------------------------------------------------- */
/* Get the human readable name of the platform. Can be useful on platforms
- like ARM, where ARM and Thumb2 functions can be mixed, and
- it is useful to know the type of the code generator. */
+ like ARM, where ARM and Thumb2 functions can be mixed, and it is useful
+ to know the type of the code generator. */
SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void);
-/* Portable helper function to get an offset of a member. */
+/* Portable helper function to get an offset of a member.
+ Same as offsetof() macro defined in stddef.h */
#define SLJIT_OFFSETOF(base, member) ((sljit_sw)(&((base*)0x10)->member) - 0x10)
#if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK)
@@ -1454,26 +2287,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_u8 *SLJIT_FUNC sljit_stack_resize(struct sljit_st
#if !(defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL)
-/* Get the entry address of a given function. */
-#define SLJIT_FUNC_OFFSET(func_name) ((sljit_sw)func_name)
+/* Get the entry address of a given function (signed, unsigned result). */
+#define SLJIT_FUNC_ADDR(func_name) ((sljit_sw)func_name)
+#define SLJIT_FUNC_UADDR(func_name) ((sljit_uw)func_name)
#else /* !(defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) */
/* All JIT related code should be placed in the same context (library, binary, etc.). */
-#define SLJIT_FUNC_OFFSET(func_name) (*(sljit_sw*)(void*)func_name)
+/* Get the entry address of a given function (signed, unsigned result). */
+#define SLJIT_FUNC_ADDR(func_name) (*(sljit_sw*)(void*)func_name)
+#define SLJIT_FUNC_UADDR(func_name) (*(sljit_uw*)(void*)func_name)
/* For powerpc64, the function pointers point to a context descriptor. */
struct sljit_function_context {
- sljit_sw addr;
- sljit_sw r2;
- sljit_sw r11;
+ sljit_uw addr;
+ sljit_uw r2;
+ sljit_uw r11;
};
/* Fill the context arguments using the addr and the function.
If func_ptr is NULL, it will not be set to the address of context
If addr is NULL, the function address also comes from the func pointer. */
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct sljit_function_context* context, sljit_sw addr, void* func);
+SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct sljit_function_context* context, sljit_uw addr, void* func);
#endif /* !(defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) */
@@ -1486,58 +2322,6 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void);
#endif
-/* --------------------------------------------------------------------- */
-/* CPU specific functions */
-/* --------------------------------------------------------------------- */
-
-/* The following function is a helper function for sljit_emit_op_custom.
- It returns with the real machine register index ( >=0 ) of any SLJIT_R,
- SLJIT_S and SLJIT_SP registers.
-
- Note: it returns with -1 for virtual registers (only on x86-32). */
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg);
-
-/* The following function is a helper function for sljit_emit_op_custom.
- It returns with the real machine register index of any SLJIT_FLOAT register.
-
- Note: the index is always an even number on ARM (except ARM-64), MIPS, and SPARC. */
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg);
-
-/* Any instruction can be inserted into the instruction stream by
- sljit_emit_op_custom. It has a similar purpose as inline assembly.
- The size parameter must match to the instruction size of the target
- architecture:
-
- x86: 0 < size <= 15. The instruction argument can be byte aligned.
- Thumb2: if size == 2, the instruction argument must be 2 byte aligned.
- if size == 4, the instruction argument must be 4 byte aligned.
- Otherwise: size must be 4 and instruction argument must be 4 byte aligned. */
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size);
-
-/* Flags were set by a 32 bit operation. */
-#define SLJIT_CURRENT_FLAGS_I32_OP SLJIT_I32_OP
-
-/* Flags were set by an ADD, ADDC, SUB, SUBC, or NEG operation. */
-#define SLJIT_CURRENT_FLAGS_ADD_SUB 0x01
-
-/* Flags were set by a SUB with unused destination.
- Must be combined with SLJIT_CURRENT_FLAGS_ADD_SUB. */
-#define SLJIT_CURRENT_FLAGS_COMPARE 0x02
-
-/* Define the currently available CPU status flags. It is usually used after
- an sljit_emit_label or sljit_emit_op_custom operations to define which CPU
- status flags are available.
-
- The current_flags must be a valid combination of SLJIT_SET_* and
- SLJIT_CURRENT_FLAGS_* constants. */
-
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *compiler,
- sljit_s32 current_flags);
-
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c
index 74cf55fcd2..d44616d800 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c
@@ -34,13 +34,16 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
{
#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
return "ARMv7" SLJIT_CPUINFO ARM_ABI_INFO;
-#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- return "ARMv5" SLJIT_CPUINFO ARM_ABI_INFO;
+#elif (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
+ return "ARMv6" SLJIT_CPUINFO ARM_ABI_INFO;
#else
#error "Internal error: Unknown ARM architecture"
#endif
}
+/* Length of an instruction word. */
+typedef sljit_u32 sljit_ins;
+
/* Last register + 1. */
#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
@@ -55,22 +58,39 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
#define CONST_POOL_EMPTY 0xffffffff
#define ALIGN_INSTRUCTION(ptr) \
- (sljit_uw*)(((sljit_uw)(ptr) + (CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1) & ~((CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1))
+ (sljit_ins*)(((sljit_ins)(ptr) + (CONST_POOL_ALIGNMENT * sizeof(sljit_ins)) - 1) & ~((CONST_POOL_ALIGNMENT * sizeof(sljit_ins)) - 1))
#define MAX_DIFFERENCE(max_diff) \
- (((max_diff) / (sljit_s32)sizeof(sljit_uw)) - (CONST_POOL_ALIGNMENT - 1))
+ (((max_diff) / (sljit_s32)sizeof(sljit_ins)) - (CONST_POOL_ALIGNMENT - 1))
/* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
0, 0, 1, 2, 3, 11, 10, 9, 8, 7, 6, 5, 4, 13, 12, 14, 15
};
-static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
- 0, 0, 1, 2, 3, 4, 5, 6, 7
+static const sljit_u8 freg_map[((SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) << 1) + 1] = {
+ 0,
+ 0, 1, 2, 3, 4, 5, 15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6,
+ 0, 1, 2, 3, 4, 5, 15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6
+};
+
+static const sljit_u8 freg_ebit_map[((SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) << 1) + 1] = {
+ 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1
};
-#define RM(rm) (reg_map[rm])
-#define RD(rd) (reg_map[rd] << 12)
-#define RN(rn) (reg_map[rn] << 16)
+#define RM(rm) ((sljit_ins)reg_map[rm])
+#define RM8(rm) ((sljit_ins)reg_map[rm] << 8)
+#define RD(rd) ((sljit_ins)reg_map[rd] << 12)
+#define RN(rn) ((sljit_ins)reg_map[rn] << 16)
+
+#define VM(vm) (((sljit_ins)freg_map[vm]) | ((sljit_ins)freg_ebit_map[vm] << 5))
+#define VD(vd) (((sljit_ins)freg_map[vd] << 12) | ((sljit_ins)freg_ebit_map[vd] << 22))
+#define VN(vn) (((sljit_ins)freg_map[vn] << 16) | ((sljit_ins)freg_ebit_map[vn] << 7))
/* --------------------------------------------------------------------- */
/* Instrucion forms */
@@ -87,14 +107,19 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define AND 0xe0000000
#define B 0xea000000
#define BIC 0xe1c00000
+#define BKPT 0xe1200070
#define BL 0xeb000000
#define BLX 0xe12fff30
#define BX 0xe12fff10
#define CLZ 0xe16f0f10
#define CMN 0xe1600000
#define CMP 0xe1400000
-#define BKPT 0xe1200070
#define EOR 0xe0200000
+#define LDR 0xe5100000
+#define LDR_POST 0xe4100000
+#define LDREX 0xe1900f9f
+#define LDREXB 0xe1d00f9f
+#define LDREXH 0xe1f00f9f
#define MOV 0xe1a00000
#define MUL 0xe0000090
#define MVN 0xe1e00000
@@ -102,44 +127,89 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define ORR 0xe1800000
#define PUSH 0xe92d0000
#define POP 0xe8bd0000
+#define REV 0xe6bf0f30
+#define REV16 0xe6bf0fb0
#define RSB 0xe0600000
#define RSC 0xe0e00000
#define SBC 0xe0c00000
#define SMULL 0xe0c00090
+#define STR 0xe5000000
+#define STREX 0xe1800f90
+#define STREXB 0xe1c00f90
+#define STREXH 0xe1e00f90
#define SUB 0xe0400000
+#define SXTB 0xe6af0070
+#define SXTH 0xe6bf0070
+#define TST 0xe1000000
#define UMULL 0xe0800090
+#define UXTB 0xe6ef0070
+#define UXTH 0xe6ff0070
#define VABS_F32 0xeeb00ac0
#define VADD_F32 0xee300a00
+#define VAND 0xf2000110
#define VCMP_F32 0xeeb40a40
#define VCVT_F32_S32 0xeeb80ac0
+#define VCVT_F32_U32 0xeeb80a40
#define VCVT_F64_F32 0xeeb70ac0
#define VCVT_S32_F32 0xeebd0ac0
#define VDIV_F32 0xee800a00
+#define VDUP 0xee800b10
+#define VDUP_s 0xf3b00c00
+#define VEOR 0xf3000110
+#define VLD1 0xf4200000
+#define VLD1_r 0xf4a00c00
+#define VLD1_s 0xf4a00000
+#define VLDR_F32 0xed100a00
#define VMOV_F32 0xeeb00a40
#define VMOV 0xee000a10
#define VMOV2 0xec400a10
+#define VMOV_i 0xf2800010
+#define VMOV_s 0xee000b10
+#define VMOVN 0xf3b20200
#define VMRS 0xeef1fa10
#define VMUL_F32 0xee200a00
#define VNEG_F32 0xeeb10a40
+#define VORR 0xf2200110
+#define VPOP 0xecbd0b00
+#define VPUSH 0xed2d0b00
+#define VSHLL 0xf2800a10
+#define VSHR 0xf2800010
+#define VSRA 0xf2800110
+#define VST1 0xf4000000
+#define VST1_s 0xf4800000
#define VSTR_F32 0xed000a00
#define VSUB_F32 0xee300a40
#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
/* Arm v7 specific instructions. */
-#define MOVW 0xe3000000
#define MOVT 0xe3400000
-#define SXTB 0xe6af0070
-#define SXTH 0xe6bf0070
-#define UXTB 0xe6ef0070
-#define UXTH 0xe6ff0070
+#define MOVW 0xe3000000
+#define RBIT 0xe6ff0f30
#endif
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+
+static sljit_s32 function_check_is_freg(struct sljit_compiler *compiler, sljit_s32 fr, sljit_s32 is_32)
+{
+ if (compiler->scratches == -1)
+ return 0;
+
+ if (is_32 && fr >= SLJIT_F64_SECOND(SLJIT_FR0))
+ fr -= SLJIT_F64_SECOND(0);
+
+ return (fr >= SLJIT_FR0 && fr < (SLJIT_FR0 + compiler->fscratches))
+ || (fr > (SLJIT_FS0 - compiler->fsaveds) && fr <= SLJIT_FS0)
+ || (fr >= SLJIT_TMP_FREGISTER_BASE && fr < (SLJIT_TMP_FREGISTER_BASE + SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS));
+}
+
+#endif /* SLJIT_ARGUMENT_CHECKS */
+
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
static sljit_s32 push_cpool(struct sljit_compiler *compiler)
{
/* Pushing the constant pool into the instruction stream. */
- sljit_uw* inst;
+ sljit_ins* inst;
sljit_uw* cpool_ptr;
sljit_uw* cpool_end;
sljit_s32 i;
@@ -149,13 +219,13 @@ static sljit_s32 push_cpool(struct sljit_compiler *compiler)
compiler->last_label->size += compiler->cpool_fill + (CONST_POOL_ALIGNMENT - 1) + 1;
SLJIT_ASSERT(compiler->cpool_fill > 0 && compiler->cpool_fill <= CPOOL_SIZE);
- inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw));
+ inst = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
FAIL_IF(!inst);
compiler->size++;
*inst = 0xff000000 | compiler->cpool_fill;
for (i = 0; i < CONST_POOL_ALIGNMENT - 1; i++) {
- inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw));
+ inst = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
FAIL_IF(!inst);
compiler->size++;
*inst = 0;
@@ -164,7 +234,7 @@ static sljit_s32 push_cpool(struct sljit_compiler *compiler)
cpool_ptr = compiler->cpool;
cpool_end = cpool_ptr + compiler->cpool_fill;
while (cpool_ptr < cpool_end) {
- inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw));
+ inst = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
FAIL_IF(!inst);
compiler->size++;
*inst = *cpool_ptr++;
@@ -174,23 +244,23 @@ static sljit_s32 push_cpool(struct sljit_compiler *compiler)
return SLJIT_SUCCESS;
}
-static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_uw inst)
+static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins inst)
{
- sljit_uw* ptr;
+ sljit_ins* ptr;
if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092)))
FAIL_IF(push_cpool(compiler));
- ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw));
+ ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
FAIL_IF(!ptr);
compiler->size++;
*ptr = inst;
return SLJIT_SUCCESS;
}
-static sljit_s32 push_inst_with_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal)
+static sljit_s32 push_inst_with_literal(struct sljit_compiler *compiler, sljit_ins inst, sljit_uw literal)
{
- sljit_uw* ptr;
+ sljit_ins* ptr;
sljit_uw cpool_index = CPOOL_SIZE;
sljit_uw* cpool_ptr;
sljit_uw* cpool_end;
@@ -204,7 +274,7 @@ static sljit_s32 push_inst_with_literal(struct sljit_compiler *compiler, sljit_u
cpool_unique_ptr = compiler->cpool_unique;
do {
if ((*cpool_ptr == literal) && !(*cpool_unique_ptr)) {
- cpool_index = cpool_ptr - compiler->cpool;
+ cpool_index = (sljit_uw)(cpool_ptr - compiler->cpool);
break;
}
cpool_ptr++;
@@ -226,7 +296,7 @@ static sljit_s32 push_inst_with_literal(struct sljit_compiler *compiler, sljit_u
}
SLJIT_ASSERT((inst & 0xfff) == 0);
- ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw));
+ ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
FAIL_IF(!ptr);
compiler->size++;
*ptr = inst | cpool_index;
@@ -238,14 +308,15 @@ static sljit_s32 push_inst_with_literal(struct sljit_compiler *compiler, sljit_u
return SLJIT_SUCCESS;
}
-static sljit_s32 push_inst_with_unique_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal)
+static sljit_s32 push_inst_with_unique_literal(struct sljit_compiler *compiler, sljit_ins inst, sljit_uw literal)
{
- sljit_uw* ptr;
+ sljit_ins* ptr;
+
if (SLJIT_UNLIKELY((compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092)) || compiler->cpool_fill >= CPOOL_SIZE))
FAIL_IF(push_cpool(compiler));
SLJIT_ASSERT(compiler->cpool_fill < CPOOL_SIZE && (inst & 0xfff) == 0);
- ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw));
+ ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
FAIL_IF(!ptr);
compiler->size++;
*ptr = inst | compiler->cpool_fill;
@@ -292,8 +363,8 @@ static sljit_uw patch_pc_relative_loads(sljit_uw *last_pc_patch, sljit_uw *code_
while (last_pc_patch < code_ptr) {
/* Data transfer instruction with Rn == r15. */
- if ((*last_pc_patch & 0x0c0f0000) == 0x040f0000) {
- diff = const_pool - last_pc_patch;
+ if ((*last_pc_patch & 0x0e0f0000) == 0x040f0000) {
+ diff = (sljit_uw)(const_pool - last_pc_patch);
ind = (*last_pc_patch) & 0xfff;
/* Must be a load instruction with immediate offset. */
@@ -308,12 +379,12 @@ static sljit_uw patch_pc_relative_loads(sljit_uw *last_pc_patch, sljit_uw *code_
SLJIT_ASSERT(diff >= 1);
if (diff >= 2 || ind > 0) {
- diff = (diff + ind - 2) << 2;
+ diff = (diff + (sljit_uw)ind - 2) << 2;
SLJIT_ASSERT(diff <= 0xfff);
- *last_pc_patch = (*last_pc_patch & ~0xfff) | diff;
+ *last_pc_patch = (*last_pc_patch & ~(sljit_uw)0xfff) | diff;
}
else
- *last_pc_patch = (*last_pc_patch & ~(0xfff | (1 << 23))) | 0x004;
+ *last_pc_patch = (*last_pc_patch & ~(sljit_uw)(0xfff | (1 << 23))) | 0x004;
}
last_pc_patch++;
}
@@ -329,24 +400,24 @@ struct future_patch {
static sljit_s32 resolve_const_pool_index(struct sljit_compiler *compiler, struct future_patch **first_patch, sljit_uw cpool_current_index, sljit_uw *cpool_start_address, sljit_uw *buf_ptr)
{
- sljit_s32 value;
+ sljit_u32 value;
struct future_patch *curr_patch, *prev_patch;
SLJIT_UNUSED_ARG(compiler);
/* Using the values generated by patch_pc_relative_loads. */
if (!*first_patch)
- value = (sljit_s32)cpool_start_address[cpool_current_index];
+ value = cpool_start_address[cpool_current_index];
else {
curr_patch = *first_patch;
prev_patch = NULL;
while (1) {
if (!curr_patch) {
- value = (sljit_s32)cpool_start_address[cpool_current_index];
+ value = cpool_start_address[cpool_current_index];
break;
}
if ((sljit_uw)curr_patch->index == cpool_current_index) {
- value = curr_patch->value;
+ value = (sljit_uw)curr_patch->value;
if (prev_patch)
prev_patch->next = curr_patch->next;
else
@@ -359,8 +430,8 @@ static sljit_s32 resolve_const_pool_index(struct sljit_compiler *compiler, struc
}
}
- if (value >= 0) {
- if ((sljit_uw)value > cpool_current_index) {
+ if ((sljit_sw)value >= 0) {
+ if (value > cpool_current_index) {
curr_patch = (struct future_patch*)SLJIT_MALLOC(sizeof(struct future_patch), compiler->allocator_data);
if (!curr_patch) {
while (*first_patch) {
@@ -371,8 +442,8 @@ static sljit_s32 resolve_const_pool_index(struct sljit_compiler *compiler, struc
return SLJIT_ERR_ALLOC_FAILED;
}
curr_patch->next = *first_patch;
- curr_patch->index = value;
- curr_patch->value = cpool_start_address[value];
+ curr_patch->index = (sljit_sw)value;
+ curr_patch->value = (sljit_sw)cpool_start_address[value];
*first_patch = curr_patch;
}
cpool_start_address[value] = *buf_ptr;
@@ -382,11 +453,11 @@ static sljit_s32 resolve_const_pool_index(struct sljit_compiler *compiler, struc
#else
-static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_uw inst)
+static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins inst)
{
- sljit_uw* ptr;
+ sljit_ins* ptr;
- ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw));
+ ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
FAIL_IF(!ptr);
compiler->size++;
*ptr = inst;
@@ -395,8 +466,8 @@ static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_uw inst)
static SLJIT_INLINE sljit_s32 emit_imm(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm)
{
- FAIL_IF(push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff)));
- return push_inst(compiler, MOVT | RD(reg) | ((imm >> 12) & 0xf0000) | ((imm >> 16) & 0xfff));
+ FAIL_IF(push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | ((sljit_u32)imm & 0xfff)));
+ return push_inst(compiler, MOVT | RD(reg) | ((imm >> 12) & 0xf0000) | (((sljit_u32)imm >> 16) & 0xfff));
}
#endif
@@ -408,7 +479,7 @@ static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_uw
if (jump->flags & SLJIT_REWRITABLE_JUMP)
return 0;
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
if (jump->flags & IS_BL)
code_ptr--;
@@ -436,7 +507,7 @@ static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_uw
jump->flags |= PATCH_B;
}
}
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
if (jump->flags & JUMP_ADDR)
diff = ((sljit_sw)jump->u.target - (sljit_sw)code_ptr - executable_offset);
else {
@@ -454,16 +525,16 @@ static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_uw
jump->flags |= PATCH_B;
return 1;
}
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
return 0;
}
static SLJIT_INLINE void inline_set_jump_addr(sljit_uw jump_ptr, sljit_sw executable_offset, sljit_uw new_addr, sljit_s32 flush_cache)
{
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- sljit_uw *ptr = (sljit_uw *)jump_ptr;
- sljit_uw *inst = (sljit_uw *)ptr[0];
- sljit_uw mov_pc = ptr[1];
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
+ sljit_ins *ptr = (sljit_ins*)jump_ptr;
+ sljit_ins *inst = (sljit_ins*)ptr[0];
+ sljit_ins mov_pc = ptr[1];
sljit_s32 bl = (mov_pc & 0x0000f000) != RD(TMP_PC);
sljit_sw diff = (sljit_sw)(((sljit_sw)new_addr - (sljit_sw)(inst + 2) - executable_offset) >> 2);
@@ -478,7 +549,7 @@ static SLJIT_INLINE void inline_set_jump_addr(sljit_uw jump_ptr, sljit_sw execut
inst[0] = (mov_pc & COND_MASK) | (B - CONDITIONAL) | (diff & 0xffffff);
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(inst, inst + 1, 1);
- inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ inst = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 1);
}
} else {
@@ -489,7 +560,7 @@ static SLJIT_INLINE void inline_set_jump_addr(sljit_uw jump_ptr, sljit_sw execut
inst[1] = NOP;
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(inst, inst + 2, 1);
- inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ inst = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 2);
}
}
@@ -508,14 +579,14 @@ static SLJIT_INLINE void inline_set_jump_addr(sljit_uw jump_ptr, sljit_sw execut
if (!bl) {
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(inst, inst + 1, 1);
- inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ inst = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 1);
}
} else {
inst[1] = BLX | RM(TMP_REG1);
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(inst, inst + 2, 1);
- inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ inst = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 2);
}
}
@@ -531,8 +602,8 @@ static SLJIT_INLINE void inline_set_jump_addr(sljit_uw jump_ptr, sljit_sw execut
SLJIT_UPDATE_WX_FLAGS(ptr, ptr + 1, 1);
}
}
-#else
- sljit_uw *inst = (sljit_uw*)jump_ptr;
+#else /* !SLJIT_CONFIG_ARM_V6 */
+ sljit_ins *inst = (sljit_ins*)jump_ptr;
SLJIT_UNUSED_ARG(executable_offset);
@@ -547,19 +618,21 @@ static SLJIT_INLINE void inline_set_jump_addr(sljit_uw jump_ptr, sljit_sw execut
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(inst, inst + 2, 1);
- inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ inst = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 2);
}
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
}
static sljit_uw get_imm(sljit_uw imm);
+static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_uw imm);
+static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg);
-static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw executable_offset, sljit_sw new_constant, sljit_s32 flush_cache)
+static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw executable_offset, sljit_uw new_constant, sljit_s32 flush_cache)
{
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- sljit_uw *ptr = (sljit_uw*)addr;
- sljit_uw *inst = (sljit_uw*)ptr[0];
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
+ sljit_ins *ptr = (sljit_ins*)addr;
+ sljit_ins *inst = (sljit_ins*)ptr[0];
sljit_uw ldr_literal = ptr[1];
sljit_uw src2;
@@ -575,7 +648,7 @@ static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw executable_off
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(inst, inst + 1, 1);
- inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ inst = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 1);
}
return;
@@ -591,7 +664,7 @@ static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw executable_off
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(inst, inst + 1, 1);
- inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ inst = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 1);
}
return;
@@ -611,7 +684,7 @@ static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw executable_off
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(inst, inst + 1, 1);
- inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ inst = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 1);
}
}
@@ -625,8 +698,8 @@ static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw executable_off
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(ptr, ptr + 1, 1);
}
-#else
- sljit_uw *inst = (sljit_uw*)addr;
+#else /* !SLJIT_CONFIG_ARM_V6 */
+ sljit_ins *inst = (sljit_ins*)addr;
SLJIT_UNUSED_ARG(executable_offset);
@@ -641,30 +714,30 @@ static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw executable_off
if (flush_cache) {
SLJIT_UPDATE_WX_FLAGS(inst, inst + 2, 1);
- inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ inst = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 2);
}
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
}
SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
{
struct sljit_memory_fragment *buf;
- sljit_uw *code;
- sljit_uw *code_ptr;
- sljit_uw *buf_ptr;
- sljit_uw *buf_end;
+ sljit_ins *code;
+ sljit_ins *code_ptr;
+ sljit_ins *buf_ptr;
+ sljit_ins *buf_end;
sljit_uw size;
sljit_uw word_count;
sljit_uw next_addr;
sljit_sw executable_offset;
- sljit_sw addr;
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+ sljit_uw addr;
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
sljit_uw cpool_size;
sljit_uw cpool_skip_alignment;
sljit_uw cpool_current_index;
- sljit_uw *cpool_start_address;
- sljit_uw *last_pc_patch;
+ sljit_ins *cpool_start_address;
+ sljit_ins *last_pc_patch;
struct future_patch *first_patch;
#endif
@@ -678,25 +751,25 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
reverse_buf(compiler);
/* Second code generation pass. */
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
size = compiler->size + (compiler->patches << 1);
if (compiler->cpool_fill > 0)
size += compiler->cpool_fill + CONST_POOL_ALIGNMENT - 1;
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
size = compiler->size;
-#endif
- code = (sljit_uw*)SLJIT_MALLOC_EXEC(size * sizeof(sljit_uw), compiler->exec_allocator_data);
+#endif /* SLJIT_CONFIG_ARM_V6 */
+ code = (sljit_ins*)SLJIT_MALLOC_EXEC(size * sizeof(sljit_ins), compiler->exec_allocator_data);
PTR_FAIL_WITH_EXEC_IF(code);
buf = compiler->buf;
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
cpool_size = 0;
cpool_skip_alignment = 0;
cpool_current_index = 0;
cpool_start_address = NULL;
first_patch = NULL;
last_pc_patch = code;
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
code_ptr = code;
word_count = 0;
@@ -714,11 +787,11 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
}
do {
- buf_ptr = (sljit_uw*)buf->memory;
+ buf_ptr = (sljit_ins*)buf->memory;
buf_end = buf_ptr + (buf->used_size >> 2);
do {
word_count++;
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
if (cpool_size > 0) {
if (cpool_skip_alignment > 0) {
buf_ptr++;
@@ -737,7 +810,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (label && label->size == word_count) {
/* Points after the current instruction. */
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
next_addr = compute_next_addr(label, jump, const_, put_label);
@@ -746,7 +819,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
}
}
else if ((*buf_ptr & 0xff000000) != PUSH_POOL) {
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
*code_ptr = *buf_ptr++;
if (next_addr == word_count) {
SLJIT_ASSERT(!label || label->size >= word_count);
@@ -756,29 +829,29 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
/* These structures are ordered by their address. */
if (jump && jump->addr == word_count) {
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
if (detect_jump_type(jump, code_ptr, code, executable_offset))
code_ptr--;
jump->addr = (sljit_uw)code_ptr;
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
jump->addr = (sljit_uw)(code_ptr - 2);
if (detect_jump_type(jump, code_ptr, code, executable_offset))
code_ptr -= 2;
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
jump = jump->next;
}
if (label && label->size == word_count) {
/* code_ptr can be affected above. */
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr + 1, executable_offset);
- label->size = (code_ptr + 1) - code;
+ label->size = (sljit_uw)((code_ptr + 1) - code);
label = label->next;
}
if (const_ && const_->addr == word_count) {
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
const_->addr = (sljit_uw)code_ptr;
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
const_->addr = (sljit_uw)(code_ptr - 1);
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
const_ = const_->next;
}
if (put_label && put_label->addr == word_count) {
@@ -789,9 +862,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
next_addr = compute_next_addr(label, jump, const_, put_label);
}
code_ptr++;
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- }
- else {
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
+ } else {
/* Fortunately, no need to shift. */
cpool_size = *buf_ptr++ & ~PUSH_POOL;
SLJIT_ASSERT(cpool_size > 0);
@@ -799,14 +871,14 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
cpool_current_index = patch_pc_relative_loads(last_pc_patch, code_ptr, cpool_start_address, cpool_size);
if (cpool_current_index > 0) {
/* Unconditional branch. */
- *code_ptr = B | (((cpool_start_address - code_ptr) + cpool_current_index - 2) & ~PUSH_POOL);
- code_ptr = cpool_start_address + cpool_current_index;
+ *code_ptr = B | (((sljit_ins)(cpool_start_address - code_ptr) + cpool_current_index - 2) & ~PUSH_POOL);
+ code_ptr = (sljit_ins*)(cpool_start_address + cpool_current_index);
}
cpool_skip_alignment = CONST_POOL_ALIGNMENT - 1;
cpool_current_index = 0;
last_pc_patch = code_ptr;
}
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
} while (buf_ptr < buf_end);
buf = buf->next;
} while (buf);
@@ -816,13 +888,13 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
SLJIT_ASSERT(!const_);
SLJIT_ASSERT(!put_label);
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
SLJIT_ASSERT(cpool_size == 0);
if (compiler->cpool_fill > 0) {
cpool_start_address = ALIGN_INSTRUCTION(code_ptr);
cpool_current_index = patch_pc_relative_loads(last_pc_patch, code_ptr, cpool_start_address, compiler->cpool_fill);
if (cpool_current_index > 0)
- code_ptr = cpool_start_address + cpool_current_index;
+ code_ptr = (sljit_ins*)(cpool_start_address + cpool_current_index);
buf_ptr = compiler->cpool;
buf_end = buf_ptr + compiler->cpool_fill;
@@ -842,33 +914,32 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
jump = compiler->jumps;
while (jump) {
- buf_ptr = (sljit_uw *)jump->addr;
+ buf_ptr = (sljit_ins*)jump->addr;
if (jump->flags & PATCH_B) {
- addr = (sljit_sw)SLJIT_ADD_EXEC_OFFSET(buf_ptr + 2, executable_offset);
+ addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr + 2, executable_offset);
if (!(jump->flags & JUMP_ADDR)) {
SLJIT_ASSERT(jump->flags & JUMP_LABEL);
- SLJIT_ASSERT(((sljit_sw)jump->u.label->addr - addr) <= 0x01ffffff && ((sljit_sw)jump->u.label->addr - addr) >= -0x02000000);
- *buf_ptr |= (((sljit_sw)jump->u.label->addr - addr) >> 2) & 0x00ffffff;
+ SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - addr) <= 0x01ffffff && (sljit_sw)(jump->u.label->addr - addr) >= -0x02000000);
+ *buf_ptr |= ((jump->u.label->addr - addr) >> 2) & 0x00ffffff;
}
else {
- SLJIT_ASSERT(((sljit_sw)jump->u.target - addr) <= 0x01ffffff && ((sljit_sw)jump->u.target - addr) >= -0x02000000);
- *buf_ptr |= (((sljit_sw)jump->u.target - addr) >> 2) & 0x00ffffff;
+ SLJIT_ASSERT((sljit_sw)(jump->u.target - addr) <= 0x01ffffff && (sljit_sw)(jump->u.target - addr) >= -0x02000000);
+ *buf_ptr |= ((jump->u.target - addr) >> 2) & 0x00ffffff;
}
}
else if (jump->flags & SLJIT_REWRITABLE_JUMP) {
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
jump->addr = (sljit_uw)code_ptr;
- code_ptr[0] = (sljit_uw)buf_ptr;
+ code_ptr[0] = (sljit_ins)buf_ptr;
code_ptr[1] = *buf_ptr;
inline_set_jump_addr((sljit_uw)code_ptr, executable_offset, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0);
code_ptr += 2;
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
inline_set_jump_addr((sljit_uw)buf_ptr, executable_offset, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0);
-#endif
- }
- else {
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#endif /* SLJIT_CONFIG_ARM_V6 */
+ } else {
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
if (jump->flags & IS_BL)
buf_ptr--;
if (*buf_ptr & (1 << 23))
@@ -876,20 +947,20 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
else
buf_ptr += 1;
*buf_ptr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
inline_set_jump_addr((sljit_uw)buf_ptr, executable_offset, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0);
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
}
jump = jump->next;
}
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
const_ = compiler->consts;
while (const_) {
- buf_ptr = (sljit_uw*)const_->addr;
+ buf_ptr = (sljit_ins*)const_->addr;
const_->addr = (sljit_uw)code_ptr;
- code_ptr[0] = (sljit_uw)buf_ptr;
+ code_ptr[0] = (sljit_ins)buf_ptr;
code_ptr[1] = *buf_ptr;
if (*buf_ptr & (1 << 23))
buf_ptr += ((*buf_ptr & 0xfff) >> 2) + 2;
@@ -901,21 +972,21 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
const_ = const_->next;
}
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
put_label = compiler->put_labels;
while (put_label) {
addr = put_label->label->addr;
- buf_ptr = (sljit_uw*)put_label->addr;
+ buf_ptr = (sljit_ins*)put_label->addr;
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
SLJIT_ASSERT((buf_ptr[0] & 0xffff0000) == 0xe59f0000);
buf_ptr[((buf_ptr[0] & 0xfff) >> 2) + 2] = addr;
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
SLJIT_ASSERT((buf_ptr[-1] & 0xfff00000) == MOVW && (buf_ptr[0] & 0xfff00000) == MOVT);
buf_ptr[-1] |= ((addr << 4) & 0xf0000) | (addr & 0xfff);
buf_ptr[0] |= ((addr >> 12) & 0xf0000) | ((addr >> 16) & 0xfff);
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
put_label = put_label->next;
}
@@ -923,10 +994,10 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
compiler->error = SLJIT_ERR_COMPILED;
compiler->executable_offset = executable_offset;
- compiler->executable_size = (code_ptr - code) * sizeof(sljit_uw);
+ compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_uw);
- code = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
- code_ptr = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
+ code = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
+ code_ptr = (sljit_ins*)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
SLJIT_CACHE_FLUSH(code, code_ptr);
SLJIT_UPDATE_WX_FLAGS(code, code_ptr, 1);
@@ -937,19 +1008,42 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
{
switch (feature_type) {
case SLJIT_HAS_FPU:
+ case SLJIT_HAS_F64_AS_F32_PAIR:
#ifdef SLJIT_IS_FPU_AVAILABLE
- return SLJIT_IS_FPU_AVAILABLE;
+ return (SLJIT_IS_FPU_AVAILABLE) != 0;
#else
/* Available by default. */
return 1;
-#endif
+#endif /* SLJIT_IS_FPU_AVAILABLE */
+ case SLJIT_HAS_SIMD:
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
+ return 0;
+#else
+#ifdef SLJIT_IS_FPU_AVAILABLE
+ return (SLJIT_IS_FPU_AVAILABLE) != 0;
+#else
+ /* Available by default. */
+ return 1;
+#endif /* SLJIT_IS_FPU_AVAILABLE */
+#endif /* SLJIT_CONFIG_ARM_V6 */
+ case SLJIT_SIMD_REGS_ARE_PAIRS:
case SLJIT_HAS_CLZ:
+ case SLJIT_HAS_ROT:
case SLJIT_HAS_CMOV:
-#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
+ case SLJIT_HAS_REV:
case SLJIT_HAS_PREFETCH:
-#endif
+ case SLJIT_HAS_COPY_F32:
+ case SLJIT_HAS_COPY_F64:
+ case SLJIT_HAS_ATOMIC:
+ return 1;
+
+ case SLJIT_HAS_CTZ:
+#if defined(SLJIT_CONFIG_ARM_V6) && SLJIT_CONFIG_ARM_V6
+ return 2;
+#else
return 1;
+#endif /* SLJIT_CONFIG_ARM_V6 */
default:
return 0;
@@ -969,16 +1063,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#define LOAD_DATA 0x08
/* Flag bits for emit_op. */
-#define ALLOW_IMM 0x10
-#define ALLOW_INV_IMM 0x20
-#define ALLOW_ANY_IMM (ALLOW_IMM | ALLOW_INV_IMM)
+#define ALLOW_IMM 0x10
+#define ALLOW_INV_IMM 0x20
+#define ALLOW_ANY_IMM (ALLOW_IMM | ALLOW_INV_IMM)
+#define ALLOW_NEG_IMM 0x40
+#define ALLOW_DOUBLE_IMM 0x80
/* s/l - store/load (1 bit)
u/s - signed/unsigned (1 bit)
w/b/h/N - word/byte/half/NOT allowed (2 bit)
Storing signed and unsigned values are the same operations. */
-static const sljit_uw data_transfer_insts[16] = {
+static const sljit_ins data_transfer_insts[16] = {
/* s u w */ 0xe5000000 /* str */,
/* s u b */ 0xe5400000 /* strb */,
/* s u h */ 0xe10000b0 /* strh */,
@@ -999,7 +1095,7 @@ static const sljit_uw data_transfer_insts[16] = {
};
#define EMIT_DATA_TRANSFER(type, add, target_reg, base_reg, arg) \
- (data_transfer_insts[(type) & 0xf] | ((add) << 23) | RD(target_reg) | RN(base_reg) | (arg))
+ (data_transfer_insts[(type) & 0xf] | ((add) << 23) | RD(target_reg) | RN(base_reg) | (sljit_ins)(arg))
/* Normal ldr/str instruction.
Type2: ldrsb, ldrh, ldrsh */
@@ -1008,6 +1104,26 @@ static const sljit_uw data_transfer_insts[16] = {
#define TYPE2_TRANSFER_IMM(imm) \
(((imm) & 0xf) | (((imm) & 0xf0) << 4) | (1 << 22))
+#define EMIT_FPU_OPERATION(opcode, mode, dst, src1, src2) \
+ ((sljit_ins)(opcode) | (sljit_ins)(mode) | VD(dst) | VM(src1) | VN(src2))
+
+/* Flags for emit_op: */
+ /* Arguments are swapped. */
+#define ARGS_SWAPPED 0x01
+ /* Inverted immediate. */
+#define INV_IMM 0x02
+ /* Source and destination is register. */
+#define MOVE_REG_CONV 0x04
+ /* Unused return value. */
+#define UNUSED_RETURN 0x08
+/* SET_FLAGS must be (1 << 20) as it is also the value of S bit (can be used for optimization). */
+#define SET_FLAGS (1 << 20)
+/* dst: reg
+ src1: reg
+ src2: reg or imm (if allowed)
+ SRC2_IMM must be (1 << 25) as it is also the value of I bit (can be used for optimization). */
+#define SRC2_IMM (1 << 25)
+
static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 inp_flags,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
@@ -1017,41 +1133,164 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 args, size, i, tmp;
- sljit_uw push;
+ sljit_uw imm, offset;
+ sljit_s32 i, tmp, size, word_arg_count;
+ sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
+#ifdef __SOFTFP__
+ sljit_u32 float_arg_count;
+#else
+ sljit_u32 old_offset, f32_offset;
+ sljit_u32 remap[3];
+ sljit_u32 *remap_ptr = remap;
+#endif
CHECK_ERROR();
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- /* Push saved registers, temporary registers
- stmdb sp!, {..., lr} */
- push = PUSH | (1 << 14);
+ imm = 0;
- tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = SLJIT_S0; i >= tmp; i--)
- push |= 1 << reg_map[i];
+ tmp = SLJIT_S0 - saveds;
+ for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--)
+ imm |= (sljit_uw)1 << reg_map[i];
for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--)
- push |= 1 << reg_map[i];
+ imm |= (sljit_uw)1 << reg_map[i];
+
+ SLJIT_ASSERT(reg_map[TMP_REG2] == 14);
- FAIL_IF(push_inst(compiler, push));
+ /* Push saved and temporary registers
+ multiple registers: stmdb sp!, {..., lr}
+ single register: str reg, [sp, #-4]! */
+ if (imm != 0)
+ FAIL_IF(push_inst(compiler, PUSH | (1 << 14) | imm));
+ else
+ FAIL_IF(push_inst(compiler, 0xe52d0004 | RD(TMP_REG2)));
/* Stack must be aligned to 8 bytes: */
- size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
- local_size = ((size + local_size + 7) & ~7) - size;
+ size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1);
+
+ if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ if ((size & SSIZE_OF(sw)) != 0) {
+ FAIL_IF(push_inst(compiler, SUB | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | sizeof(sljit_sw)));
+ size += SSIZE_OF(sw);
+ }
+
+ if (fsaveds + fscratches >= SLJIT_NUMBER_OF_FLOAT_REGISTERS) {
+ FAIL_IF(push_inst(compiler, VPUSH | VD(SLJIT_FS0) | ((sljit_ins)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS << 1)));
+ } else {
+ if (fsaveds > 0)
+ FAIL_IF(push_inst(compiler, VPUSH | VD(SLJIT_FS0) | ((sljit_ins)fsaveds << 1)));
+ if (fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG)
+ FAIL_IF(push_inst(compiler, VPUSH | VD(fscratches) | ((sljit_ins)(fscratches - (SLJIT_FIRST_SAVED_FLOAT_REG - 1)) << 1)));
+ }
+ }
+
+ local_size = ((size + local_size + 0x7) & ~0x7) - size;
compiler->local_size = local_size;
- if (local_size > 0)
- FAIL_IF(emit_op(compiler, SLJIT_SUB, ALLOW_IMM, SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size));
- args = get_arg_count(arg_types);
+ if (options & SLJIT_ENTER_REG_ARG)
+ arg_types = 0;
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ word_arg_count = 0;
+ saved_arg_count = 0;
+#ifdef __SOFTFP__
+ SLJIT_COMPILE_ASSERT(SLJIT_FR0 == 1, float_register_index_start);
+
+ offset = 0;
+ float_arg_count = 0;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (offset & 0x7)
+ offset += sizeof(sljit_sw);
+
+ if (offset < 4 * sizeof(sljit_sw))
+ FAIL_IF(push_inst(compiler, VMOV2 | (offset << 10) | ((offset + sizeof(sljit_sw)) << 14) | float_arg_count));
+ else
+ FAIL_IF(push_inst(compiler, VLDR_F32 | 0x800100 | RN(SLJIT_SP)
+ | (float_arg_count << 12) | ((offset + (sljit_ins)size - 4 * sizeof(sljit_sw)) >> 2)));
+ float_arg_count++;
+ offset += sizeof(sljit_f64) - sizeof(sljit_sw);
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ if (offset < 4 * sizeof(sljit_sw))
+ FAIL_IF(push_inst(compiler, VMOV | (float_arg_count << 16) | (offset << 10)));
+ else
+ FAIL_IF(push_inst(compiler, VLDR_F32 | 0x800000 | RN(SLJIT_SP)
+ | (float_arg_count << 12) | ((offset + (sljit_ins)size - 4 * sizeof(sljit_sw)) >> 2)));
+ float_arg_count++;
+ break;
+ default:
+ word_arg_count++;
+
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ tmp = SLJIT_S0 - saved_arg_count;
+ saved_arg_count++;
+ } else if (word_arg_count - 1 != (sljit_s32)(offset >> 2))
+ tmp = word_arg_count;
+ else
+ break;
+
+ if (offset < 4 * sizeof(sljit_sw))
+ FAIL_IF(push_inst(compiler, MOV | RD(tmp) | (offset >> 2)));
+ else
+ FAIL_IF(push_inst(compiler, LDR | 0x800000 | RN(SLJIT_SP) | RD(tmp) | (offset + (sljit_ins)size - 4 * sizeof(sljit_sw))));
+ break;
+ }
+
+ offset += sizeof(sljit_sw);
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
+
+ compiler->args_size = offset;
+#else
+ offset = SLJIT_FR0;
+ old_offset = SLJIT_FR0;
+ f32_offset = 0;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (offset != old_offset)
+ *remap_ptr++ = EMIT_FPU_OPERATION(VMOV_F32, SLJIT_32, offset, old_offset, 0);
+ old_offset++;
+ offset++;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ if (f32_offset != 0) {
+ *remap_ptr++ = EMIT_FPU_OPERATION(VMOV_F32, 0x20, offset, f32_offset, 0);
+ f32_offset = 0;
+ } else {
+ if (offset != old_offset)
+ *remap_ptr++ = EMIT_FPU_OPERATION(VMOV_F32, 0, offset, old_offset, 0);
+ f32_offset = old_offset;
+ old_offset++;
+ }
+ offset++;
+ break;
+ default:
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S0 - saved_arg_count) | RM(SLJIT_R0 + word_arg_count)));
+ saved_arg_count++;
+ }
+
+ word_arg_count++;
+ break;
+ }
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
+
+ SLJIT_ASSERT((sljit_uw)(remap_ptr - remap) <= sizeof(remap));
+
+ while (remap_ptr > remap)
+ FAIL_IF(push_inst(compiler, *(--remap_ptr)));
+#endif
- if (args >= 1)
- FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S0) | RM(SLJIT_R0)));
- if (args >= 2)
- FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S1) | RM(SLJIT_R1)));
- if (args >= 3)
- FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S2) | RM(SLJIT_R2)));
+ if (local_size > 0)
+ FAIL_IF(emit_op(compiler, SLJIT_SUB, ALLOW_IMM | ALLOW_DOUBLE_IMM, SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size));
return SLJIT_SUCCESS;
}
@@ -1066,77 +1305,203 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
- compiler->local_size = ((size + local_size + 7) & ~7) - size;
+ size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1);
+
+ /* Doubles are saved, so alignment is unaffected. */
+ if ((size & SSIZE_OF(sw)) != 0 && (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG))
+ size += SSIZE_OF(sw);
+
+ compiler->local_size = ((size + local_size + 0x7) & ~0x7) - size;
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+static sljit_s32 emit_add_sp(struct sljit_compiler *compiler, sljit_uw imm)
{
- sljit_s32 i, tmp;
- sljit_uw pop;
+ sljit_uw imm2 = get_imm(imm);
- CHECK_ERROR();
- CHECK(check_sljit_emit_return(compiler, op, src, srcw));
+ if (imm2 == 0)
+ return emit_op(compiler, SLJIT_ADD, ALLOW_IMM | ALLOW_DOUBLE_IMM, SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, (sljit_sw)imm);
+
+ return push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | imm2);
+}
+
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 frame_size)
+{
+ sljit_s32 local_size, fscratches, fsaveds, i, tmp;
+ sljit_s32 restored_reg = 0;
+ sljit_s32 lr_dst = TMP_PC;
+ sljit_uw reg_list = 0;
+
+ SLJIT_ASSERT(reg_map[TMP_REG2] == 14 && frame_size <= 128);
+
+ local_size = compiler->local_size;
+ fscratches = compiler->fscratches;
+ fsaveds = compiler->fsaveds;
+
+ if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ if (local_size > 0)
+ FAIL_IF(emit_add_sp(compiler, (sljit_uw)local_size));
+
+ if (fsaveds + fscratches >= SLJIT_NUMBER_OF_FLOAT_REGISTERS) {
+ FAIL_IF(push_inst(compiler, VPOP | VD(SLJIT_FS0) | ((sljit_ins)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS << 1)));
+ } else {
+ if (fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG)
+ FAIL_IF(push_inst(compiler, VPOP | VD(fscratches) | ((sljit_ins)(fscratches - (SLJIT_FIRST_SAVED_FLOAT_REG - 1)) << 1)));
+ if (fsaveds > 0)
+ FAIL_IF(push_inst(compiler, VPOP | VD(SLJIT_FS0) | ((sljit_ins)fsaveds << 1)));
+ }
+
+ local_size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 1) & 0x7;
+ }
+
+ if (frame_size < 0) {
+ lr_dst = TMP_REG2;
+ frame_size = 0;
+ } else if (frame_size > 0) {
+ SLJIT_ASSERT(frame_size == 1 || (frame_size & 0x7) == 0);
+ lr_dst = 0;
+ frame_size &= ~0x7;
+ }
+
+ if (lr_dst != 0)
+ reg_list |= (sljit_uw)1 << reg_map[lr_dst];
+
+ tmp = SLJIT_S0 - compiler->saveds;
+ i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
+ if (tmp < i) {
+ restored_reg = i;
+ do {
+ reg_list |= (sljit_uw)1 << reg_map[i];
+ } while (--i > tmp);
+ }
+
+ i = compiler->scratches;
+ if (i >= SLJIT_FIRST_SAVED_REG) {
+ restored_reg = i;
+ do {
+ reg_list |= (sljit_uw)1 << reg_map[i];
+ } while (--i >= SLJIT_FIRST_SAVED_REG);
+ }
+
+ if (lr_dst == TMP_REG2 && reg_list == 0) {
+ restored_reg = TMP_REG2;
+ lr_dst = 0;
+ }
+
+ if (lr_dst == 0 && (reg_list & (reg_list - 1)) == 0) {
+ /* The local_size does not include the saved registers. */
+ tmp = 0;
+ if (reg_list != 0) {
+ tmp = 2;
+ if (local_size <= 0xfff) {
+ if (local_size == 0) {
+ SLJIT_ASSERT(restored_reg != TMP_REG2);
+ if (frame_size == 0)
+ return push_inst(compiler, LDR_POST | RN(SLJIT_SP) | RD(restored_reg) | 0x800008);
+ if (frame_size > 2 * SSIZE_OF(sw))
+ return push_inst(compiler, LDR_POST | RN(SLJIT_SP) | RD(restored_reg) | (sljit_ins)(frame_size - (2 * SSIZE_OF(sw))));
+ }
+
+ FAIL_IF(push_inst(compiler, LDR | 0x800000 | RN(SLJIT_SP) | RD(restored_reg) | (sljit_ins)local_size));
+ tmp = 1;
+ } else if (frame_size == 0) {
+ frame_size = (restored_reg == TMP_REG2) ? SSIZE_OF(sw) : 2 * SSIZE_OF(sw);
+ tmp = 3;
+ }
+
+ /* Place for the saved register. */
+ if (restored_reg != TMP_REG2)
+ local_size += SSIZE_OF(sw);
+ }
+
+ /* Place for the lr register. */
+ local_size += SSIZE_OF(sw);
+
+ if (frame_size > local_size)
+ FAIL_IF(push_inst(compiler, SUB | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 25) | (sljit_ins)(frame_size - local_size)));
+ else if (frame_size < local_size)
+ FAIL_IF(emit_add_sp(compiler, (sljit_uw)(local_size - frame_size)));
+
+ if (tmp <= 1)
+ return SLJIT_SUCCESS;
+
+ if (tmp == 2) {
+ frame_size -= SSIZE_OF(sw);
+ if (restored_reg != TMP_REG2)
+ frame_size -= SSIZE_OF(sw);
+
+ return push_inst(compiler, LDR | 0x800000 | RN(SLJIT_SP) | RD(restored_reg) | (sljit_ins)frame_size);
+ }
+
+ tmp = (restored_reg == TMP_REG2) ? 0x800004 : 0x800008;
+ return push_inst(compiler, LDR_POST | RN(SLJIT_SP) | RD(restored_reg) | (sljit_ins)tmp);
+ }
+
+ if (local_size > 0)
+ FAIL_IF(emit_add_sp(compiler, (sljit_uw)local_size));
+
+ /* Pop saved and temporary registers
+ multiple registers: ldmia sp!, {...}
+ single register: ldr reg, [sp], #4 */
+ if ((reg_list & (reg_list - 1)) == 0) {
+ SLJIT_ASSERT(lr_dst != 0);
+ SLJIT_ASSERT(reg_list == (sljit_uw)1 << reg_map[lr_dst]);
+
+ return push_inst(compiler, LDR_POST | RN(SLJIT_SP) | RD(lr_dst) | 0x800004);
+ }
+
+ FAIL_IF(push_inst(compiler, POP | reg_list));
+
+ if (frame_size > 0)
+ return push_inst(compiler, SUB | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 25) | ((sljit_ins)frame_size - sizeof(sljit_sw)));
- FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
+ if (lr_dst != 0)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 25) | sizeof(sljit_sw));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
- if (compiler->local_size > 0)
- FAIL_IF(emit_op(compiler, SLJIT_ADD, ALLOW_IMM, SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size));
+ return emit_stack_frame_release(compiler, 0);
+}
- /* Push saved registers, temporary registers
- ldmia sp!, {..., pc} */
- pop = POP | (1 << 15);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_to(compiler, src, srcw));
- tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = SLJIT_S0; i >= tmp; i--)
- pop |= 1 << reg_map[i];
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ srcw = 0;
+ } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | RM(src)));
+ src = TMP_REG1;
+ srcw = 0;
+ }
- for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--)
- pop |= 1 << reg_map[i];
+ FAIL_IF(emit_stack_frame_release(compiler, 1));
- return push_inst(compiler, pop);
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
}
/* --------------------------------------------------------------------- */
/* Operators */
/* --------------------------------------------------------------------- */
-/* flags: */
- /* Arguments are swapped. */
-#define ARGS_SWAPPED 0x01
- /* Inverted immediate. */
-#define INV_IMM 0x02
- /* Source and destination is register. */
-#define MOVE_REG_CONV 0x04
- /* Unused return value. */
-#define UNUSED_RETURN 0x08
-/* SET_FLAGS must be (1 << 20) as it is also the value of S bit (can be used for optimization). */
-#define SET_FLAGS (1 << 20)
-/* dst: reg
- src1: reg
- src2: reg or imm (if allowed)
- SRC2_IMM must be (1 << 25) as it is also the value of I bit (can be used for optimization). */
-#define SRC2_IMM (1 << 25)
-
-#define EMIT_SHIFT_INS_AND_RETURN(opcode) \
- SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM)); \
- if (compiler->shift_imm != 0x20) { \
- SLJIT_ASSERT(src1 == TMP_REG1); \
- SLJIT_ASSERT(!(flags & ARGS_SWAPPED)); \
- \
- if (compiler->shift_imm != 0) \
- return push_inst(compiler, MOV | (flags & SET_FLAGS) | \
- RD(dst) | (compiler->shift_imm << 7) | (opcode << 5) | RM(src2)); \
- return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | RM(src2)); \
- } \
- return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | \
- (reg_map[(flags & ARGS_SWAPPED) ? src1 : src2] << 8) | (opcode << 5) | 0x10 | RM((flags & ARGS_SWAPPED) ? src2 : src1));
-
static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
- sljit_s32 dst, sljit_s32 src1, sljit_s32 src2)
+ sljit_uw dst, sljit_uw src1, sljit_uw src2)
{
- switch (GET_OPCODE(op)) {
+ sljit_s32 is_masked;
+ sljit_uw shift_type;
+
+ switch (op) {
case SLJIT_MOV:
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED));
if (dst != src2) {
@@ -1150,17 +1515,10 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
case SLJIT_MOV_U8:
case SLJIT_MOV_S8:
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED));
- if (flags & MOVE_REG_CONV) {
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- if (op == SLJIT_MOV_U8)
- return push_inst(compiler, AND | RD(dst) | RN(src2) | SRC2_IMM | 0xff);
- FAIL_IF(push_inst(compiler, MOV | RD(dst) | (24 << 7) | RM(src2)));
- return push_inst(compiler, MOV | RD(dst) | (24 << 7) | (op == SLJIT_MOV_U8 ? 0x20 : 0x40) | RM(dst));
-#else
+ if (flags & MOVE_REG_CONV)
return push_inst(compiler, (op == SLJIT_MOV_U8 ? UXTB : SXTB) | RD(dst) | RM(src2));
-#endif
- }
- else if (dst != src2) {
+
+ if (dst != src2) {
SLJIT_ASSERT(src2 & SRC2_IMM);
return push_inst(compiler, ((flags & INV_IMM) ? MVN : MOV) | RD(dst) | src2);
}
@@ -1169,37 +1527,51 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
case SLJIT_MOV_U16:
case SLJIT_MOV_S16:
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED));
- if (flags & MOVE_REG_CONV) {
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- FAIL_IF(push_inst(compiler, MOV | RD(dst) | (16 << 7) | RM(src2)));
- return push_inst(compiler, MOV | RD(dst) | (16 << 7) | (op == SLJIT_MOV_U16 ? 0x20 : 0x40) | RM(dst));
-#else
+ if (flags & MOVE_REG_CONV)
return push_inst(compiler, (op == SLJIT_MOV_U16 ? UXTH : SXTH) | RD(dst) | RM(src2));
-#endif
- }
- else if (dst != src2) {
+
+ if (dst != src2) {
SLJIT_ASSERT(src2 & SRC2_IMM);
return push_inst(compiler, ((flags & INV_IMM) ? MVN : MOV) | RD(dst) | src2);
}
return SLJIT_SUCCESS;
- case SLJIT_NOT:
- if (src2 & SRC2_IMM) {
- return push_inst(compiler, ((flags & INV_IMM) ? MOV : MVN) | (flags & SET_FLAGS) | RD(dst) | src2);
- }
- return push_inst(compiler, MVN | (flags & SET_FLAGS) | RD(dst) | RM(src2));
-
case SLJIT_CLZ:
- SLJIT_ASSERT(!(flags & INV_IMM));
- SLJIT_ASSERT(!(src2 & SRC2_IMM));
+ SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM));
FAIL_IF(push_inst(compiler, CLZ | RD(dst) | RM(src2)));
return SLJIT_SUCCESS;
+ case SLJIT_CTZ:
+ SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM));
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED));
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
+ FAIL_IF(push_inst(compiler, RSB | SRC2_IMM | RD(TMP_REG1) | RN(src2) | 0));
+ FAIL_IF(push_inst(compiler, AND | RD(TMP_REG2) | RN(src2) | RM(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, CLZ | RD(dst) | RM(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, CMP | SET_FLAGS | SRC2_IMM | RN(dst) | 32));
+ return push_inst(compiler, (EOR ^ 0xf0000000) | SRC2_IMM | RD(dst) | RN(dst) | 0x1f);
+#else /* !SLJIT_CONFIG_ARM_V6 */
+ FAIL_IF(push_inst(compiler, RBIT | RD(dst) | RM(src2)));
+ return push_inst(compiler, CLZ | RD(dst) | RM(dst));
+#endif /* SLJIT_CONFIG_ARM_V6 */
+
+ case SLJIT_REV:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED));
+ return push_inst(compiler, REV | RD(dst) | RM(src2));
+
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED) && src2 != TMP_REG1 && dst != TMP_REG1);
+ FAIL_IF(push_inst(compiler, REV16 | RD(dst) | RM(src2)));
+ if (dst == TMP_REG2 || (src2 == TMP_REG2 && op == SLJIT_REV_U16))
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, (op == SLJIT_REV_U16 ? UXTH : SXTH) | RD(dst) | RM(dst));
case SLJIT_ADD:
SLJIT_ASSERT(!(flags & INV_IMM));
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
- if ((flags & (UNUSED_RETURN | SET_FLAGS)) == (UNUSED_RETURN | SET_FLAGS) && !(flags & ARGS_SWAPPED))
+ if ((flags & (UNUSED_RETURN | ARGS_SWAPPED)) == UNUSED_RETURN)
return push_inst(compiler, CMN | SET_FLAGS | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
return push_inst(compiler, ADD | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
@@ -1209,10 +1581,10 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
case SLJIT_SUB:
SLJIT_ASSERT(!(flags & INV_IMM));
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
- if ((flags & (UNUSED_RETURN | SET_FLAGS)) == (UNUSED_RETURN | SET_FLAGS) && !(flags & ARGS_SWAPPED))
+ if ((flags & (UNUSED_RETURN | ARGS_SWAPPED)) == UNUSED_RETURN)
return push_inst(compiler, CMP | SET_FLAGS | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
+
return push_inst(compiler, (!(flags & ARGS_SWAPPED) ? SUB : RSB) | (flags & SET_FLAGS)
| RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
@@ -1226,15 +1598,17 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
SLJIT_ASSERT(!(src2 & SRC2_IMM));
compiler->status_flags_state = 0;
- if (!HAS_FLAGS(op))
- return push_inst(compiler, MUL | (reg_map[dst] << 16) | (reg_map[src2] << 8) | reg_map[src1]);
+ if (!(flags & SET_FLAGS))
+ return push_inst(compiler, MUL | RN(dst) | RM8(src2) | RM(src1));
- FAIL_IF(push_inst(compiler, SMULL | (reg_map[TMP_REG1] << 16) | (reg_map[dst] << 12) | (reg_map[src2] << 8) | reg_map[src1]));
+ FAIL_IF(push_inst(compiler, SMULL | RN(TMP_REG1) | RD(dst) | RM8(src2) | RM(src1)));
/* cmp TMP_REG1, dst asr #31. */
return push_inst(compiler, CMP | SET_FLAGS | RN(TMP_REG1) | RM(dst) | 0xfc0);
case SLJIT_AND:
+ if ((flags & (UNUSED_RETURN | INV_IMM)) == UNUSED_RETURN)
+ return push_inst(compiler, TST | SET_FLAGS | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
return push_inst(compiler, (!(flags & INV_IMM) ? AND : BIC) | (flags & SET_FLAGS)
| RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
@@ -1243,21 +1617,68 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
return push_inst(compiler, ORR | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
case SLJIT_XOR:
- SLJIT_ASSERT(!(flags & INV_IMM));
+ if (flags & INV_IMM) {
+ SLJIT_ASSERT(src2 == SRC2_IMM);
+ return push_inst(compiler, MVN | (flags & SET_FLAGS) | RD(dst) | RM(src1));
+ }
return push_inst(compiler, EOR | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
case SLJIT_SHL:
- EMIT_SHIFT_INS_AND_RETURN(0);
+ case SLJIT_MSHL:
+ shift_type = 0;
+ is_masked = op == SLJIT_MSHL;
+ break;
case SLJIT_LSHR:
- EMIT_SHIFT_INS_AND_RETURN(1);
+ case SLJIT_MLSHR:
+ shift_type = 1;
+ is_masked = op == SLJIT_MLSHR;
+ break;
case SLJIT_ASHR:
- EMIT_SHIFT_INS_AND_RETURN(2);
+ case SLJIT_MASHR:
+ shift_type = 2;
+ is_masked = op == SLJIT_MASHR;
+ break;
+
+ case SLJIT_ROTL:
+ if (compiler->shift_imm == 0x20) {
+ FAIL_IF(push_inst(compiler, RSB | SRC2_IMM | RD(TMP_REG2) | RN(src2) | 0));
+ src2 = TMP_REG2;
+ } else
+ compiler->shift_imm = (sljit_uw)(-(sljit_sw)compiler->shift_imm) & 0x1f;
+ /* fallthrough */
+
+ case SLJIT_ROTR:
+ shift_type = 3;
+ is_masked = 0;
+ break;
+
+ default:
+ SLJIT_UNREACHABLE();
+ return SLJIT_SUCCESS;
}
- SLJIT_UNREACHABLE();
- return SLJIT_SUCCESS;
+ SLJIT_ASSERT(!(flags & ARGS_SWAPPED) && !(flags & INV_IMM) && !(src2 & SRC2_IMM));
+
+ if (compiler->shift_imm != 0x20) {
+ SLJIT_ASSERT(src1 == TMP_REG1);
+
+ if (compiler->shift_imm != 0)
+ return push_inst(compiler, MOV | (flags & SET_FLAGS) |
+ RD(dst) | (compiler->shift_imm << 7) | (shift_type << 5) | RM(src2));
+ return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | RM(src2));
+ }
+
+ SLJIT_ASSERT(src1 != TMP_REG2);
+
+ if (is_masked) {
+ FAIL_IF(push_inst(compiler, AND | RD(TMP_REG2) | RN(src2) | SRC2_IMM | 0x1f));
+ src2 = TMP_REG2;
+ }
+
+ return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst)
+ | RM8(src2) | (sljit_ins)(shift_type << 5) | 0x10 | RM(src1));
}
#undef EMIT_SHIFT_INS_AND_RETURN
@@ -1266,7 +1687,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
Returns with 0 if not possible. */
static sljit_uw get_imm(sljit_uw imm)
{
- sljit_s32 rol;
+ sljit_u32 rol;
if (imm <= 0xff)
return SRC2_IMM | imm;
@@ -1274,8 +1695,7 @@ static sljit_uw get_imm(sljit_uw imm)
if (!(imm & 0xff000000)) {
imm <<= 8;
rol = 8;
- }
- else {
+ } else {
imm = (imm << 24) | (imm >> 8);
rol = 0;
}
@@ -1297,22 +1717,19 @@ static sljit_uw get_imm(sljit_uw imm)
if (!(imm & 0x00ffffff))
return SRC2_IMM | (imm >> 24) | (rol << 8);
- else
- return 0;
+ return 0;
}
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
-static sljit_s32 generate_int(struct sljit_compiler *compiler, sljit_s32 reg, sljit_uw imm, sljit_s32 positive)
+static sljit_uw compute_imm(sljit_uw imm, sljit_uw* imm2)
{
sljit_uw mask;
sljit_uw imm1;
- sljit_uw imm2;
- sljit_s32 rol;
+ sljit_uw rol;
/* Step1: Search a zero byte (8 continous zero bit). */
mask = 0xff000000;
rol = 8;
- while(1) {
+ while (1) {
if (!(imm & mask)) {
/* Rol imm by rol. */
imm = (imm << rol) | (imm >> (32 - rol));
@@ -1320,6 +1737,7 @@ static sljit_s32 generate_int(struct sljit_compiler *compiler, sljit_s32 reg, sl
rol = 4 + (rol >> 1);
break;
}
+
rol += 2;
mask >>= 2;
if (mask & 0x3) {
@@ -1349,9 +1767,8 @@ static sljit_s32 generate_int(struct sljit_compiler *compiler, sljit_s32 reg, sl
if (!(imm & 0xff000000)) {
imm1 = SRC2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
- imm2 = SRC2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
- }
- else if (imm & 0xc0000000) {
+ *imm2 = SRC2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+ } else if (imm & 0xc0000000) {
imm1 = SRC2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
imm <<= 8;
rol += 4;
@@ -1372,11 +1789,10 @@ static sljit_s32 generate_int(struct sljit_compiler *compiler, sljit_s32 reg, sl
}
if (!(imm & 0x00ffffff))
- imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+ *imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
else
return 0;
- }
- else {
+ } else {
if (!(imm & 0xf0000000)) {
imm <<= 4;
rol += 2;
@@ -1402,25 +1818,23 @@ static sljit_s32 generate_int(struct sljit_compiler *compiler, sljit_s32 reg, sl
}
if (!(imm & 0x00ffffff))
- imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+ *imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
else
return 0;
}
- FAIL_IF(push_inst(compiler, (positive ? MOV : MVN) | RD(reg) | imm1));
- FAIL_IF(push_inst(compiler, (positive ? ORR : BIC) | RD(reg) | RN(reg) | imm2));
- return 1;
+ return imm1;
}
-#endif
static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_uw imm)
{
sljit_uw tmp;
-
-#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
- if (!(imm & ~0xffff))
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
+ sljit_uw imm1, imm2;
+#else /* !SLJIT_CONFIG_ARM_V6 */
+ if (!(imm & ~(sljit_uw)0xffff))
return push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff));
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
/* Create imm by 1 inst. */
tmp = get_imm(imm);
@@ -1431,42 +1845,56 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
if (tmp)
return push_inst(compiler, MVN | RD(reg) | tmp);
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
/* Create imm by 2 inst. */
- FAIL_IF(generate_int(compiler, reg, imm, 1));
- FAIL_IF(generate_int(compiler, reg, ~imm, 0));
+ imm1 = compute_imm(imm, &imm2);
+ if (imm1 != 0) {
+ FAIL_IF(push_inst(compiler, MOV | RD(reg) | imm1));
+ return push_inst(compiler, ORR | RD(reg) | RN(reg) | imm2);
+ }
+
+ imm1 = compute_imm(~imm, &imm2);
+ if (imm1 != 0) {
+ FAIL_IF(push_inst(compiler, MVN | RD(reg) | imm1));
+ return push_inst(compiler, BIC | RD(reg) | RN(reg) | imm2);
+ }
/* Load integer. */
return push_inst_with_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, reg, TMP_PC, 0), imm);
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
FAIL_IF(push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff)));
if (imm <= 0xffff)
return SLJIT_SUCCESS;
return push_inst(compiler, MOVT | RD(reg) | ((imm >> 12) & 0xf0000) | ((imm >> 16) & 0xfff));
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
}
-static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg,
+static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg,
sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
{
- sljit_uw imm, offset_reg;
- sljit_uw is_type1_transfer = IS_TYPE1_TRANSFER(flags);
+ sljit_uw imm, offset_reg, tmp;
+ sljit_sw mask = IS_TYPE1_TRANSFER(flags) ? 0xfff : 0xff;
+ sljit_sw sign = IS_TYPE1_TRANSFER(flags) ? 0x1000 : 0x100;
- SLJIT_ASSERT (arg & SLJIT_MEM);
- SLJIT_ASSERT((arg & REG_MASK) != tmp_reg);
+ SLJIT_ASSERT(arg & SLJIT_MEM);
+ SLJIT_ASSERT((arg & REG_MASK) != tmp_reg || (arg == SLJIT_MEM1(tmp_reg) && argw >= -mask && argw <= mask));
- if ((arg & REG_MASK) == SLJIT_UNUSED) {
- if (is_type1_transfer) {
- FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~0xfff));
- argw &= 0xfff;
- }
- else {
- FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~0xff));
- argw &= 0xff;
+ if (SLJIT_UNLIKELY(!(arg & REG_MASK))) {
+ tmp = (sljit_uw)(argw & (sign | mask));
+ tmp = (sljit_uw)((argw + (tmp <= (sljit_uw)sign ? 0 : sign)) & ~mask);
+
+ FAIL_IF(load_immediate(compiler, tmp_reg, tmp));
+
+ argw -= (sljit_sw)tmp;
+ tmp = 1;
+
+ if (argw < 0) {
+ argw = -argw;
+ tmp = 0;
}
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, tmp_reg,
- is_type1_transfer ? argw : TYPE2_TRANSFER_IMM(argw)));
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, tmp, reg, tmp_reg,
+ (mask == 0xff) ? TYPE2_TRANSFER_IMM(argw) : argw));
}
if (arg & OFFS_REG_MASK) {
@@ -1474,72 +1902,62 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
arg &= REG_MASK;
argw &= 0x3;
- if (argw != 0 && !is_type1_transfer) {
- FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | RM(offset_reg) | (argw << 7)));
+ if (argw != 0 && (mask == 0xff)) {
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | RM(offset_reg) | ((sljit_ins)argw << 7)));
return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, tmp_reg, TYPE2_TRANSFER_IMM(0)));
}
/* Bit 25: RM is offset. */
return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg,
- RM(offset_reg) | (is_type1_transfer ? (1 << 25) : 0) | (argw << 7)));
+ RM(offset_reg) | (mask == 0xff ? 0 : (1 << 25)) | ((sljit_ins)argw << 7)));
}
arg &= REG_MASK;
- if (is_type1_transfer) {
- if (argw > 0xfff) {
- imm = get_imm(argw & ~0xfff);
- if (imm) {
- FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm));
- argw = argw & 0xfff;
- arg = tmp_reg;
- }
- }
- else if (argw < -0xfff) {
- imm = get_imm(-argw & ~0xfff);
- if (imm) {
- FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm));
- argw = -(-argw & 0xfff);
- arg = tmp_reg;
- }
+ if (argw > mask) {
+ tmp = (sljit_uw)(argw & (sign | mask));
+ tmp = (sljit_uw)((argw + (tmp <= (sljit_uw)sign ? 0 : sign)) & ~mask);
+ imm = get_imm(tmp);
+
+ if (imm) {
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm));
+ argw -= (sljit_sw)tmp;
+ arg = tmp_reg;
+
+ SLJIT_ASSERT(argw >= -mask && argw <= mask);
}
+ } else if (argw < -mask) {
+ tmp = (sljit_uw)(-argw & (sign | mask));
+ tmp = (sljit_uw)((-argw + (tmp <= (sljit_uw)sign ? 0 : sign)) & ~mask);
+ imm = get_imm(tmp);
- if (argw >= 0 && argw <= 0xfff)
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, argw));
+ if (imm) {
+ FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm));
+ argw += (sljit_sw)tmp;
+ arg = tmp_reg;
- if (argw < 0 && argw >= -0xfff)
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, -argw));
- }
- else {
- if (argw > 0xff) {
- imm = get_imm(argw & ~0xff);
- if (imm) {
- FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm));
- argw = argw & 0xff;
- arg = tmp_reg;
- }
+ SLJIT_ASSERT(argw >= -mask && argw <= mask);
}
- else if (argw < -0xff) {
- imm = get_imm(-argw & ~0xff);
- if (imm) {
- FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm));
- argw = -(-argw & 0xff);
- arg = tmp_reg;
- }
+ }
+
+ if (argw <= mask && argw >= -mask) {
+ if (argw >= 0) {
+ if (mask == 0xff)
+ argw = TYPE2_TRANSFER_IMM(argw);
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, argw));
}
- if (argw >= 0 && argw <= 0xff)
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, TYPE2_TRANSFER_IMM(argw)));
+ argw = -argw;
- if (argw < 0 && argw >= -0xff) {
- argw = -argw;
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, TYPE2_TRANSFER_IMM(argw)));
- }
+ if (mask == 0xff)
+ argw = TYPE2_TRANSFER_IMM(argw);
+
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, argw));
}
- FAIL_IF(load_immediate(compiler, tmp_reg, argw));
+ FAIL_IF(load_immediate(compiler, tmp_reg, (sljit_uw)argw));
return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg,
- RM(tmp_reg) | (is_type1_transfer ? (1 << 25) : 0)));
+ RM(tmp_reg) | (mask == 0xff ? 0 : (1 << 25))));
}
static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 inp_flags,
@@ -1553,59 +1971,80 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
/* We prefers register and simple consts. */
sljit_s32 dst_reg;
- sljit_s32 src1_reg;
- sljit_s32 src2_reg;
+ sljit_s32 src1_reg = 0;
+ sljit_s32 src2_reg = 0;
sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
+ sljit_s32 neg_op = 0;
+ sljit_u32 imm2;
- /* Destination check. */
- if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED))
+ op = GET_OPCODE(op);
+
+ if (flags & SET_FLAGS)
+ inp_flags &= ~ALLOW_DOUBLE_IMM;
+
+ if (dst == TMP_REG2)
flags |= UNUSED_RETURN;
SLJIT_ASSERT(!(inp_flags & ALLOW_INV_IMM) || (inp_flags & ALLOW_IMM));
- src2_reg = 0;
+ if (inp_flags & ALLOW_NEG_IMM) {
+ switch (op) {
+ case SLJIT_ADD:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+ neg_op = SLJIT_SUB;
+ break;
+ case SLJIT_ADDC:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+ neg_op = SLJIT_SUBC;
+ break;
+ case SLJIT_SUB:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
+ neg_op = SLJIT_ADD;
+ break;
+ case SLJIT_SUBC:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
+ neg_op = SLJIT_ADDC;
+ break;
+ }
+ }
do {
if (!(inp_flags & ALLOW_IMM))
break;
- if (src2 & SLJIT_IMM) {
- src2_reg = get_imm(src2w);
+ if (src2 == SLJIT_IMM) {
+ src2_reg = (sljit_s32)get_imm((sljit_uw)src2w);
if (src2_reg)
break;
+
if (inp_flags & ALLOW_INV_IMM) {
- src2_reg = get_imm(~src2w);
+ src2_reg = (sljit_s32)get_imm(~(sljit_uw)src2w);
if (src2_reg) {
flags |= INV_IMM;
break;
}
}
- if (GET_OPCODE(op) == SLJIT_ADD) {
- src2_reg = get_imm(-src2w);
- if (src2_reg) {
- op = SLJIT_SUB | GET_ALL_FLAGS(op);
- break;
- }
- }
- if (GET_OPCODE(op) == SLJIT_SUB) {
- src2_reg = get_imm(-src2w);
+
+ if (neg_op != 0) {
+ src2_reg = (sljit_s32)get_imm((neg_op == SLJIT_ADD || neg_op == SLJIT_SUB) ? (sljit_uw)-src2w : ~(sljit_uw)src2w);
if (src2_reg) {
- op = SLJIT_ADD | GET_ALL_FLAGS(op);
+ op = neg_op | GET_ALL_FLAGS(op);
break;
}
}
}
- if (src1 & SLJIT_IMM) {
- src2_reg = get_imm(src1w);
+ if (src1 == SLJIT_IMM) {
+ src2_reg = (sljit_s32)get_imm((sljit_uw)src1w);
if (src2_reg) {
flags |= ARGS_SWAPPED;
src1 = src2;
src1w = src2w;
break;
}
+
if (inp_flags & ALLOW_INV_IMM) {
- src2_reg = get_imm(~src1w);
+ src2_reg = (sljit_s32)get_imm(~(sljit_uw)src1w);
if (src2_reg) {
flags |= ARGS_SWAPPED | INV_IMM;
src1 = src2;
@@ -1613,13 +2052,16 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
break;
}
}
- if (GET_OPCODE(op) == SLJIT_ADD) {
- src2_reg = get_imm(-src1w);
+
+ if (neg_op >= SLJIT_SUB) {
+ /* Note: additive operation (commutative). */
+ SLJIT_ASSERT(op == SLJIT_ADD || op == SLJIT_ADDC);
+
+ src2_reg = (sljit_s32)get_imm((sljit_uw)-src1w);
if (src2_reg) {
- /* Note: add is commutative operation. */
src1 = src2;
src1w = src2w;
- op = SLJIT_SUB | GET_ALL_FLAGS(op);
+ op = neg_op | GET_ALL_FLAGS(op);
break;
}
}
@@ -1632,14 +2074,13 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
else if (src1 & SLJIT_MEM) {
FAIL_IF(emit_op_mem(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, TMP_REG1));
src1_reg = TMP_REG1;
- }
- else {
- FAIL_IF(load_immediate(compiler, TMP_REG1, src1w));
+ } else if (!(inp_flags & ALLOW_DOUBLE_IMM) || src2_reg != 0 || op == SLJIT_SUB || op == SLJIT_SUBC) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)src1w));
src1_reg = TMP_REG1;
}
/* Destination. */
- dst_reg = SLOW_IS_REG(dst) ? dst : TMP_REG2;
+ dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG2;
if (op <= SLJIT_MOV_P) {
if (dst & SLJIT_MEM) {
@@ -1662,11 +2103,65 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
src2_reg = src2;
else if (src2 & SLJIT_MEM)
FAIL_IF(emit_op_mem(compiler, inp_flags | LOAD_DATA, src2_reg, src2, src2w, TMP_REG2));
- else
- FAIL_IF(load_immediate(compiler, src2_reg, src2w));
+ else if (!(inp_flags & ALLOW_DOUBLE_IMM))
+ FAIL_IF(load_immediate(compiler, src2_reg, (sljit_uw)src2w));
+ else {
+ SLJIT_ASSERT(!(flags & SET_FLAGS));
+
+ if (src1_reg == 0) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)src1w));
+ src1_reg = TMP_REG1;
+ }
+
+ src2_reg = (sljit_s32)compute_imm((sljit_uw)src2w, &imm2);
+
+ if (src2_reg == 0 && neg_op != 0) {
+ src2_reg = (sljit_s32)compute_imm((sljit_uw)-src2w, &imm2);
+ if (src2_reg != 0)
+ op = neg_op;
+ }
+
+ if (src2_reg == 0) {
+ FAIL_IF(load_immediate(compiler, TMP_REG2, (sljit_uw)src2w));
+ src2_reg = TMP_REG2;
+ } else {
+ FAIL_IF(emit_single_op(compiler, op, flags, (sljit_uw)dst_reg, (sljit_uw)src1_reg, (sljit_uw)src2_reg));
+ src1_reg = dst_reg;
+ src2_reg = (sljit_s32)imm2;
+
+ if (op == SLJIT_ADDC)
+ op = SLJIT_ADD;
+ else if (op == SLJIT_SUBC)
+ op = SLJIT_SUB;
+ }
+ }
+ }
+
+ if (src1_reg == 0) {
+ SLJIT_ASSERT((inp_flags & ALLOW_DOUBLE_IMM) && !(flags & SET_FLAGS));
+
+ src1_reg = (sljit_s32)compute_imm((sljit_uw)src1w, &imm2);
+
+ if (src1_reg == 0 && neg_op != 0) {
+ src1_reg = (sljit_s32)compute_imm((sljit_uw)-src1w, &imm2);
+ if (src1_reg != 0)
+ op = neg_op;
+ }
+
+ if (src1_reg == 0) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)src1w));
+ src1_reg = TMP_REG1;
+ } else {
+ FAIL_IF(emit_single_op(compiler, op, flags, (sljit_uw)dst_reg, (sljit_uw)src2_reg, (sljit_uw)src1_reg));
+ src1_reg = dst_reg;
+ src2_reg = (sljit_s32)imm2;
+
+ if (op == SLJIT_ADDC)
+ op = SLJIT_ADD;
+ }
}
- FAIL_IF(emit_single_op(compiler, op, flags, dst_reg, src1_reg, src2_reg));
+ FAIL_IF(emit_single_op(compiler, op, flags, (sljit_uw)dst_reg, (sljit_uw)src1_reg, (sljit_uw)src2_reg));
if (!(dst & SLJIT_MEM))
return SLJIT_SUCCESS;
@@ -1691,7 +2186,7 @@ extern int __aeabi_idivmod(int numerator, int denominator);
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
{
- sljit_sw saved_reg_list[3];
+ sljit_uw saved_reg_list[3];
sljit_sw saved_reg_count;
CHECK_ERROR();
@@ -1708,10 +2203,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
case SLJIT_LMUL_UW:
case SLJIT_LMUL_SW:
return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
- | (reg_map[SLJIT_R1] << 16)
- | (reg_map[SLJIT_R0] << 12)
- | (reg_map[SLJIT_R0] << 8)
- | reg_map[SLJIT_R1]);
+ | RN(SLJIT_R1) | RD(SLJIT_R0) | RM8(SLJIT_R0) | RM(SLJIT_R1));
case SLJIT_DIVMOD_UW:
case SLJIT_DIVMOD_SW:
case SLJIT_DIV_UW:
@@ -1728,21 +2220,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
saved_reg_list[saved_reg_count++] = 1;
if (saved_reg_count > 0) {
- FAIL_IF(push_inst(compiler, 0xe52d0000 | (saved_reg_count >= 3 ? 16 : 8)
+ FAIL_IF(push_inst(compiler, STR | 0x2d0000 | (saved_reg_count >= 3 ? 16 : 8)
| (saved_reg_list[0] << 12) /* str rX, [sp, #-8/-16]! */));
if (saved_reg_count >= 2) {
SLJIT_ASSERT(saved_reg_list[1] < 8);
- FAIL_IF(push_inst(compiler, 0xe58d0004 | (saved_reg_list[1] << 12) /* str rX, [sp, #4] */));
+ FAIL_IF(push_inst(compiler, STR | 0x8d0004 | (saved_reg_list[1] << 12) /* str rX, [sp, #4] */));
}
if (saved_reg_count >= 3) {
SLJIT_ASSERT(saved_reg_list[2] < 8);
- FAIL_IF(push_inst(compiler, 0xe58d0008 | (saved_reg_list[2] << 12) /* str rX, [sp, #8] */));
+ FAIL_IF(push_inst(compiler, STR | 0x8d0008 | (saved_reg_list[2] << 12) /* str rX, [sp, #8] */));
}
}
#if defined(__GNUC__)
FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM,
- ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
+ ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_ADDR(__aeabi_uidivmod) : SLJIT_FUNC_ADDR(__aeabi_idivmod))));
#else
#error "Software divmod functions are needed"
#endif
@@ -1750,13 +2242,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
if (saved_reg_count > 0) {
if (saved_reg_count >= 3) {
SLJIT_ASSERT(saved_reg_list[2] < 8);
- FAIL_IF(push_inst(compiler, 0xe59d0008 | (saved_reg_list[2] << 12) /* ldr rX, [sp, #8] */));
+ FAIL_IF(push_inst(compiler, LDR | 0x8d0008 | (saved_reg_list[2] << 12) /* ldr rX, [sp, #8] */));
}
if (saved_reg_count >= 2) {
SLJIT_ASSERT(saved_reg_list[1] < 8);
- FAIL_IF(push_inst(compiler, 0xe59d0004 | (saved_reg_list[1] << 12) /* ldr rX, [sp, #4] */));
+ FAIL_IF(push_inst(compiler, LDR | 0x8d0004 | (saved_reg_list[1] << 12) /* ldr rX, [sp, #4] */));
}
- return push_inst(compiler, 0xe49d0000 | (saved_reg_count >= 3 ? 16 : 8)
+ return push_inst(compiler, (LDR ^ (1 << 24)) | 0x8d0000 | (sljit_ins)(saved_reg_count >= 3 ? 16 : 8)
| (saved_reg_list[0] << 12) /* ldr rX, [sp], #8/16 */);
}
return SLJIT_SUCCESS;
@@ -1781,33 +2273,32 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
case SLJIT_MOV:
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
case SLJIT_MOV_P:
return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw);
case SLJIT_MOV_U8:
- return emit_op(compiler, SLJIT_MOV_U8, ALLOW_ANY_IMM | BYTE_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_U8, ALLOW_ANY_IMM | BYTE_SIZE, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_u8)srcw : srcw);
case SLJIT_MOV_S8:
- return emit_op(compiler, SLJIT_MOV_S8, ALLOW_ANY_IMM | SIGNED | BYTE_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_S8, ALLOW_ANY_IMM | SIGNED | BYTE_SIZE, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_s8)srcw : srcw);
case SLJIT_MOV_U16:
- return emit_op(compiler, SLJIT_MOV_U16, ALLOW_ANY_IMM | HALF_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_U16, ALLOW_ANY_IMM | HALF_SIZE, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_u16)srcw : srcw);
case SLJIT_MOV_S16:
- return emit_op(compiler, SLJIT_MOV_S16, ALLOW_ANY_IMM | SIGNED | HALF_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
-
- case SLJIT_NOT:
- return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw);
-
- case SLJIT_NEG:
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- return sljit_emit_op2(compiler, SLJIT_SUB | GET_ALL_FLAGS(op), dst, dstw, SLJIT_IMM, 0, src, srcw);
+ return emit_op(compiler, SLJIT_MOV_S16, ALLOW_ANY_IMM | SIGNED | HALF_SIZE, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_s16)srcw : srcw);
case SLJIT_CLZ:
+ case SLJIT_CTZ:
+ case SLJIT_REV:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src, srcw);
+
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ return emit_op(compiler, op, HALF_SIZE, dst, dstw, TMP_REG1, 0, src, srcw);
}
return SLJIT_SUCCESS;
@@ -1818,23 +2309,30 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
+ sljit_s32 inp_flags;
+
CHECK_ERROR();
- CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
- return SLJIT_SUCCESS;
-
switch (GET_OPCODE(op)) {
case SLJIT_ADD:
case SLJIT_ADDC:
case SLJIT_SUB:
case SLJIT_SUBC:
+ return emit_op(compiler, op, ALLOW_IMM | ALLOW_NEG_IMM | ALLOW_DOUBLE_IMM, dst, dstw, src1, src1w, src2, src2w);
+
case SLJIT_OR:
+ return emit_op(compiler, op, ALLOW_IMM | ALLOW_DOUBLE_IMM, dst, dstw, src1, src1w, src2, src2w);
+
case SLJIT_XOR:
- return emit_op(compiler, op, ALLOW_IMM, dst, dstw, src1, src1w, src2, src2w);
+ inp_flags = ALLOW_IMM | ALLOW_DOUBLE_IMM;
+ if ((src1 == SLJIT_IMM && src1w == -1) || (src2 == SLJIT_IMM && src2w == -1)) {
+ inp_flags |= ALLOW_INV_IMM;
+ }
+ return emit_op(compiler, op, inp_flags, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_MUL:
return emit_op(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w);
@@ -1843,13 +2341,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SHL:
+ case SLJIT_MSHL:
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
case SLJIT_ASHR:
- if (src2 & SLJIT_IMM) {
+ case SLJIT_MASHR:
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
+ if (src2 == SLJIT_IMM) {
compiler->shift_imm = src2w & 0x1f;
return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src1, src1w);
- }
- else {
+ } else {
compiler->shift_imm = 0x20;
return emit_op(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w);
}
@@ -1858,6 +2360,66 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return SLJIT_SUCCESS;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
+{
+ sljit_s32 is_left;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_shift_into(compiler, op, dst_reg, src1_reg, src2_reg, src3, src3w));
+
+ op = GET_OPCODE(op);
+ is_left = (op == SLJIT_SHL || op == SLJIT_MSHL);
+
+ if (src1_reg == src2_reg) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, is_left ? SLJIT_ROTL : SLJIT_ROTR, dst_reg, 0, src1_reg, 0, src3, src3w);
+ }
+
+ ADJUST_LOCAL_OFFSET(src3, src3w);
+
+ /* Shift type of ROR is 3. */
+ if (src3 == SLJIT_IMM) {
+ src3w &= 0x1f;
+
+ if (src3w == 0)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst(compiler, MOV | RD(dst_reg) | RM(src1_reg) | ((sljit_ins)(is_left ? 0 : 1) << 5) | ((sljit_ins)src3w << 7)));
+ src3w = (src3w ^ 0x1f) + 1;
+ return push_inst(compiler, ORR | RD(dst_reg) | RN(dst_reg) | RM(src2_reg) | ((sljit_ins)(is_left ? 1 : 0) << 5) | ((sljit_ins)src3w << 7));
+ }
+
+ if (src3 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG2, src3, src3w, TMP_REG2));
+ src3 = TMP_REG2;
+ }
+
+ if (op == SLJIT_MSHL || op == SLJIT_MLSHR || dst_reg == src3) {
+ FAIL_IF(push_inst(compiler, AND | SRC2_IMM | RD(TMP_REG2) | RN(src3) | 0x1f));
+ src3 = TMP_REG2;
+ }
+
+ FAIL_IF(push_inst(compiler, MOV | RD(dst_reg) | RM8(src3) | ((sljit_ins)(is_left ? 0 : 1) << 5) | 0x10 | RM(src1_reg)));
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | RM(src2_reg) | ((sljit_ins)(is_left ? 1 : 0) << 5) | (1 << 7)));
+ FAIL_IF(push_inst(compiler, EOR | SRC2_IMM | RD(TMP_REG2) | RN(src3) | 0x1f));
+ return push_inst(compiler, ORR | RD(dst_reg) | RN(dst_reg) | RM8(TMP_REG2) | ((sljit_ins)(is_left ? 1 : 0) << 5) | 0x10 | RM(TMP_REG1));
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw)
{
@@ -1881,59 +2443,97 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
case SLJIT_PREFETCH_L2:
case SLJIT_PREFETCH_L3:
case SLJIT_PREFETCH_ONCE:
-#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
SLJIT_ASSERT(src & SLJIT_MEM);
return emit_op_mem(compiler, PRELOAD | LOAD_DATA, TMP_PC, src, srcw, TMP_REG1);
-#else /* !SLJIT_CONFIG_ARM_V7 */
- return SLJIT_SUCCESS;
-#endif /* SLJIT_CONFIG_ARM_V7 */
}
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
{
- CHECK_REG_INDEX(check_sljit_get_register_index(reg));
- return reg_map[reg];
+ sljit_s32 size, dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_dst(compiler, op, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ switch (op) {
+ case SLJIT_FAST_ENTER:
+ SLJIT_ASSERT(reg_map[TMP_REG2] == 14);
+
+ if (FAST_IS_REG(dst))
+ return push_inst(compiler, MOV | RD(dst) | RM(TMP_REG2));
+ break;
+ case SLJIT_GET_RETURN_ADDRESS:
+ size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds - SLJIT_KEPT_SAVEDS_COUNT(compiler->options), 0);
+
+ if (compiler->fsaveds > 0 || compiler->fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ /* The size of pc is not added above. */
+ if ((size & SSIZE_OF(sw)) == 0)
+ size += SSIZE_OF(sw);
+
+ size += GET_SAVED_FLOAT_REGISTERS_SIZE(compiler->fscratches, compiler->fsaveds, f64);
+ }
+
+ SLJIT_ASSERT(((compiler->local_size + size + SSIZE_OF(sw)) & 0x7) == 0);
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, dst_r, SLJIT_MEM1(SLJIT_SP), compiler->local_size + size, TMP_REG1));
+ break;
+ }
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, WORD_SIZE, TMP_REG2, dst, dstw, TMP_REG1);
+
+ return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
{
- CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- return (freg_map[reg] << 1);
+ CHECK_REG_INDEX(check_sljit_get_register_index(type, reg));
+
+ if (type == SLJIT_GP_REGISTER)
+ return reg_map[reg];
+
+ if (type == SLJIT_FLOAT_REGISTER || type == SLJIT_SIMD_REG_64)
+ return freg_map[reg];
+
+ if (type != SLJIT_SIMD_REG_128)
+ return freg_map[reg] & ~0x1;
+
+ return -1;
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
+ SLJIT_UNUSED_ARG(size);
CHECK_ERROR();
CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
- return push_inst(compiler, *(sljit_uw*)instruction);
+ return push_inst(compiler, *(sljit_ins*)instruction);
}
/* --------------------------------------------------------------------- */
/* Floating point operators */
/* --------------------------------------------------------------------- */
-
#define FPU_LOAD (1 << 20)
#define EMIT_FPU_DATA_TRANSFER(inst, add, base, freg, offs) \
- ((inst) | ((add) << 23) | (reg_map[base] << 16) | (freg_map[freg] << 12) | (offs))
-#define EMIT_FPU_OPERATION(opcode, mode, dst, src1, src2) \
- ((opcode) | (mode) | (freg_map[dst] << 12) | freg_map[src1] | (freg_map[src2] << 16))
+ ((inst) | (sljit_ins)((add) << 23) | RN(base) | VD(freg) | (sljit_ins)(offs))
static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
{
sljit_uw imm;
- sljit_sw inst = VSTR_F32 | (flags & (SLJIT_F32_OP | FPU_LOAD));
+ sljit_ins inst = VSTR_F32 | (flags & (SLJIT_32 | FPU_LOAD));
SLJIT_ASSERT(arg & SLJIT_MEM);
arg &= ~SLJIT_MEM;
if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
- FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | ((argw & 0x3) << 7)));
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (((sljit_ins)argw & 0x3) << 7)));
arg = TMP_REG2;
argw = 0;
}
@@ -1945,12 +2545,12 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
if (!(-argw & ~0x3fc))
return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, arg & REG_MASK, reg, (-argw) >> 2));
- imm = get_imm(argw & ~0x3fc);
+ imm = get_imm((sljit_uw)argw & ~(sljit_uw)0x3fc);
if (imm) {
FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | imm));
return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG2, reg, (argw & 0x3fc) >> 2));
}
- imm = get_imm(-argw & ~0x3fc);
+ imm = get_imm((sljit_uw)-argw & ~(sljit_uw)0x3fc);
if (imm) {
argw = -argw;
FAIL_IF(push_inst(compiler, SUB | RD(TMP_REG2) | RN(arg & REG_MASK) | imm));
@@ -1959,11 +2559,11 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
}
if (arg) {
- FAIL_IF(load_immediate(compiler, TMP_REG2, argw));
+ FAIL_IF(load_immediate(compiler, TMP_REG2, (sljit_uw)argw));
FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | RM(TMP_REG2)));
}
else
- FAIL_IF(load_immediate(compiler, TMP_REG2, argw));
+ FAIL_IF(load_immediate(compiler, TMP_REG2, (sljit_uw)argw));
return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG2, reg, 0));
}
@@ -1972,66 +2572,83 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
if (src & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src, srcw));
src = TMP_FREG1;
}
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_F32_OP, TMP_FREG1, src, 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_32, TMP_FREG1, src, 0)));
if (FAST_IS_REG(dst))
- return push_inst(compiler, VMOV | (1 << 20) | RD(dst) | (freg_map[TMP_FREG1] << 16));
+ return push_inst(compiler, VMOV | (1 << 20) | RD(dst) | VN(TMP_FREG1));
/* Store the integer value from a VFP register. */
return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw);
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+static sljit_s32 sljit_emit_fop1_conv_f64_from_w(struct sljit_compiler *compiler, sljit_ins ins,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
- op ^= SLJIT_F32_OP;
-
if (FAST_IS_REG(src))
- FAIL_IF(push_inst(compiler, VMOV | RD(src) | (freg_map[TMP_FREG1] << 16)));
+ FAIL_IF(push_inst(compiler, VMOV | RD(src) | VN(TMP_FREG1)));
else if (src & SLJIT_MEM) {
/* Load the integer value into a VFP register. */
FAIL_IF(emit_fop_mem(compiler, FPU_LOAD, TMP_FREG1, src, srcw));
}
else {
- FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
- FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | (freg_map[TMP_FREG1] << 16)));
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)srcw));
+ FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | VN(TMP_FREG1)));
}
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F32_S32, op & SLJIT_F32_OP, dst_r, TMP_FREG1, 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(ins, ins & SLJIT_32, dst_r, TMP_FREG1, 0)));
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
+ return emit_fop_mem(compiler, (ins & SLJIT_32), TMP_FREG1, dst, dstw);
return SLJIT_SUCCESS;
}
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ return sljit_emit_fop1_conv_f64_from_w(compiler, VCVT_F32_S32 | (~op & SLJIT_32), dst, dstw, src, srcw);
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ return sljit_emit_fop1_conv_f64_from_w(compiler, VCVT_F32_U32 | (~op & SLJIT_32), dst, dstw, src, srcw);
+}
+
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
if (src1 & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_F32_OP, src1, src2, 0)));
- return push_inst(compiler, VMRS);
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_32, src1, src2, 0)));
+ FAIL_IF(push_inst(compiler, VMRS));
+
+ if (GET_FLAG_TYPE(op) != SLJIT_UNORDERED_OR_EQUAL)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, (CMP - CONDITIONAL) | (0x60000000 /* VS */) | SET_FLAGS | RN(TMP_REG1) | RM(TMP_REG1));
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -2042,16 +2659,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
CHECK_ERROR();
- SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_32 == 0x100), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (GET_OPCODE(op) != SLJIT_CONV_F64_FROM_F32)
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
if (src & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, dst_r, src, srcw));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, dst_r, src, srcw));
src = dst_r;
}
@@ -2059,25 +2676,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1)
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_32, dst_r, src, 0)));
else
dst_r = src;
}
break;
case SLJIT_NEG_F64:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_32, dst_r, src, 0)));
break;
case SLJIT_ABS_F64:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_32, dst_r, src, 0)));
break;
case SLJIT_CONV_F64_FROM_F32:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F64_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
- op ^= SLJIT_F32_OP;
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F64_F32, op & SLJIT_32, dst_r, src, 0)));
+ op ^= SLJIT_32;
break;
}
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, (op & SLJIT_F32_OP), dst_r, dst, dstw);
+ return emit_fop_mem(compiler, (op & SLJIT_32), dst_r, dst, dstw);
return SLJIT_SUCCESS;
}
@@ -2094,125 +2711,232 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src2 & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
if (src1 & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
switch (GET_OPCODE(op)) {
case SLJIT_ADD_F64:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_32, dst_r, src2, src1)));
break;
-
case SLJIT_SUB_F64:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_32, dst_r, src2, src1)));
break;
-
case SLJIT_MUL_F64:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_32, dst_r, src2, src1)));
break;
-
case SLJIT_DIV_F64:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_32, dst_r, src2, src1)));
break;
+ case SLJIT_COPYSIGN_F64:
+ FAIL_IF(push_inst(compiler, VMOV | (1 << 20) | VN(src2) | RD(TMP_REG1) | ((op & SLJIT_32) ? (1 << 7) : 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_32, dst_r, src1, 0)));
+ FAIL_IF(push_inst(compiler, CMP | SET_FLAGS | RN(TMP_REG1) | SRC2_IMM | 0));
+ return push_inst(compiler, EMIT_FPU_OPERATION((VNEG_F32 & ~COND_MASK) | 0xb0000000, op & SLJIT_32, dst_r, dst_r, 0));
}
if (dst_r == TMP_FREG1)
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32), TMP_FREG1, dst, dstw));
return SLJIT_SUCCESS;
}
-#undef FPU_LOAD
#undef EMIT_FPU_DATA_TRANSFER
-/* --------------------------------------------------------------------- */
-/* Other instructions */
-/* --------------------------------------------------------------------- */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
+{
+#if defined(__ARM_NEON) && __ARM_NEON
+ sljit_u32 exp;
+ sljit_ins ins;
+#endif /* NEON */
+ union {
+ sljit_u32 imm;
+ sljit_f32 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
+
+ u.value = value;
+
+#if defined(__ARM_NEON) && __ARM_NEON
+ if ((u.imm << (32 - 19)) == 0) {
+ exp = (u.imm >> (23 + 2)) & 0x3f;
+
+ if (exp == 0x20 || exp == 0x1f) {
+ ins = ((u.imm >> 24) & 0x80) | ((u.imm >> 19) & 0x7f);
+ return push_inst(compiler, (VMOV_F32 ^ (1 << 6)) | ((ins & 0xf0) << 12) | VD(freg) | (ins & 0xf));
+ }
+ }
+#endif /* NEON */
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm));
+ return push_inst(compiler, VMOV | VN(freg) | RD(TMP_REG1));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
{
+#if defined(__ARM_NEON) && __ARM_NEON
+ sljit_u32 exp;
+ sljit_ins ins;
+#endif /* NEON */
+ union {
+ sljit_u32 imm[2];
+ sljit_f64 value;
+ } u;
+
CHECK_ERROR();
- CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
- ADJUST_LOCAL_OFFSET(dst, dstw);
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
- SLJIT_ASSERT(reg_map[TMP_REG2] == 14);
+ u.value = value;
- if (FAST_IS_REG(dst))
- return push_inst(compiler, MOV | RD(dst) | RM(TMP_REG2));
+#if defined(__ARM_NEON) && __ARM_NEON
+ if (u.imm[0] == 0 && (u.imm[1] << (64 - 48)) == 0) {
+ exp = (u.imm[1] >> ((52 - 32) + 2)) & 0x1ff;
+
+ if (exp == 0x100 || exp == 0xff) {
+ ins = ((u.imm[1] >> (56 - 32)) & 0x80) | ((u.imm[1] >> (48 - 32)) & 0x7f);
+ return push_inst(compiler, (VMOV_F32 ^ (1 << 6)) | (1 << 8) | ((ins & 0xf0) << 12) | VD(freg) | (ins & 0xf));
+ }
+ }
+#endif /* NEON */
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm[0]));
+ if (u.imm[0] == u.imm[1])
+ return push_inst(compiler, VMOV2 | RN(TMP_REG1) | RD(TMP_REG1) | VM(freg));
+
+ FAIL_IF(load_immediate(compiler, TMP_REG2, u.imm[1]));
+ return push_inst(compiler, VMOV2 | RN(TMP_REG2) | RD(TMP_REG1) | VM(freg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_s32 reg2;
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
- /* Memory. */
- return emit_op_mem(compiler, WORD_SIZE, TMP_REG2, dst, dstw, TMP_REG1);
+ if (reg & REG_PAIR_MASK) {
+ reg2 = REG_PAIR_SECOND(reg);
+ reg = REG_PAIR_FIRST(reg);
+
+ inst = VMOV2 | RN(reg) | RD(reg2) | VM(freg);
+ } else {
+ inst = VMOV | VN(freg) | RD(reg);
+
+ if (!(op & SLJIT_32))
+ inst |= 1 << 7;
+ }
+
+ if (GET_OPCODE(op) == SLJIT_COPY_FROM_F64)
+ inst |= 1 << 20;
+
+ return push_inst(compiler, inst);
}
/* --------------------------------------------------------------------- */
/* Conditional instructions */
/* --------------------------------------------------------------------- */
-static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type)
+static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
{
switch (type) {
case SLJIT_EQUAL:
- case SLJIT_EQUAL_F64:
+ case SLJIT_ATOMIC_STORED:
+ case SLJIT_F_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
return 0x00000000;
case SLJIT_NOT_EQUAL:
- case SLJIT_NOT_EQUAL_F64:
+ case SLJIT_ATOMIC_NOT_STORED:
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
return 0x10000000;
+ case SLJIT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD)
+ return 0x20000000;
+ /* fallthrough */
+
case SLJIT_LESS:
- case SLJIT_LESS_F64:
return 0x30000000;
+ case SLJIT_NOT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD)
+ return 0x30000000;
+ /* fallthrough */
+
case SLJIT_GREATER_EQUAL:
- case SLJIT_GREATER_EQUAL_F64:
return 0x20000000;
case SLJIT_GREATER:
- case SLJIT_GREATER_F64:
+ case SLJIT_UNORDERED_OR_GREATER:
return 0x80000000;
case SLJIT_LESS_EQUAL:
- case SLJIT_LESS_EQUAL_F64:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
return 0x90000000;
case SLJIT_SIG_LESS:
+ case SLJIT_UNORDERED_OR_LESS:
return 0xb0000000;
case SLJIT_SIG_GREATER_EQUAL:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
return 0xa0000000;
case SLJIT_SIG_GREATER:
+ case SLJIT_F_GREATER:
+ case SLJIT_ORDERED_GREATER:
return 0xc0000000;
case SLJIT_SIG_LESS_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
return 0xd0000000;
case SLJIT_OVERFLOW:
- if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
+ if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)))
return 0x10000000;
+ /* fallthrough */
- case SLJIT_UNORDERED_F64:
+ case SLJIT_UNORDERED:
return 0x60000000;
case SLJIT_NOT_OVERFLOW:
- if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
+ if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)))
return 0x00000000;
+ /* fallthrough */
- case SLJIT_ORDERED_F64:
+ case SLJIT_ORDERED:
return 0x70000000;
+ case SLJIT_F_LESS:
+ case SLJIT_ORDERED_LESS:
+ return 0x40000000;
+
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ return 0x50000000;
+
default:
- SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_CDECL);
+ SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_REG_ARG);
return 0xe0000000;
}
}
@@ -2247,7 +2971,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
SLJIT_ASSERT(reg_map[TMP_REG1] != 14);
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
if (type >= SLJIT_FAST_CALL)
PTR_FAIL_IF(prepare_blx(compiler));
PTR_FAIL_IF(push_inst_with_unique_literal(compiler, ((EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1,
@@ -2265,123 +2989,136 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
if (!(jump->flags & SLJIT_REWRITABLE_JUMP))
jump->addr = compiler->size;
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
if (type >= SLJIT_FAST_CALL)
jump->flags |= IS_BL;
PTR_FAIL_IF(emit_imm(compiler, TMP_REG1, 0));
PTR_FAIL_IF(push_inst(compiler, (((type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1)) & ~COND_MASK) | get_cc(compiler, type)));
jump->addr = compiler->size;
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
return jump;
}
#ifdef __SOFTFP__
-static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src)
+static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src, sljit_u32 *extra_space)
{
- sljit_s32 stack_offset = 0;
- sljit_s32 arg_count = 0;
- sljit_s32 word_arg_offset = 0;
- sljit_s32 float_arg_count = 0;
+ sljit_u32 is_tail_call = *extra_space & SLJIT_CALL_RETURN;
+ sljit_u32 offset = 0;
+ sljit_u32 word_arg_offset = 0;
+ sljit_u32 src_offset = 4 * sizeof(sljit_sw);
+ sljit_u32 float_arg_count = 0;
sljit_s32 types = 0;
- sljit_s32 src_offset = 4 * sizeof(sljit_sw);
sljit_u8 offsets[4];
+ sljit_u8 *offset_ptr = offsets;
if (src && FAST_IS_REG(*src))
- src_offset = reg_map[*src] * sizeof(sljit_sw);
+ src_offset = (sljit_u32)reg_map[*src] * sizeof(sljit_sw);
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+ types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- offsets[arg_count] = (sljit_u8)stack_offset;
- stack_offset += sizeof(sljit_f32);
- arg_count++;
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (offset & 0x7)
+ offset += sizeof(sljit_sw);
+ *offset_ptr++ = (sljit_u8)offset;
+ offset += sizeof(sljit_f64);
float_arg_count++;
break;
- case SLJIT_ARG_TYPE_F64:
- if (stack_offset & 0x7)
- stack_offset += sizeof(sljit_sw);
- offsets[arg_count] = (sljit_u8)stack_offset;
- stack_offset += sizeof(sljit_f64);
- arg_count++;
+ case SLJIT_ARG_TYPE_F32:
+ *offset_ptr++ = (sljit_u8)offset;
+ offset += sizeof(sljit_f32);
float_arg_count++;
break;
default:
- offsets[arg_count] = (sljit_u8)stack_offset;
- stack_offset += sizeof(sljit_sw);
- arg_count++;
+ *offset_ptr++ = (sljit_u8)offset;
+ offset += sizeof(sljit_sw);
word_arg_offset += sizeof(sljit_sw);
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
- if (stack_offset > 16)
- FAIL_IF(push_inst(compiler, SUB | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | (((stack_offset - 16) + 0x7) & ~0x7)));
+ if (offset > 4 * sizeof(sljit_sw) && (!is_tail_call || offset > compiler->args_size)) {
+ /* Keep lr register on the stack. */
+ if (is_tail_call)
+ offset += sizeof(sljit_sw);
+
+ offset = ((offset - 4 * sizeof(sljit_sw)) + 0x7) & ~(sljit_u32)0x7;
+
+ *extra_space = offset;
+
+ if (is_tail_call)
+ FAIL_IF(emit_stack_frame_release(compiler, (sljit_s32)offset));
+ else
+ FAIL_IF(push_inst(compiler, SUB | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | offset));
+ } else {
+ if (is_tail_call)
+ FAIL_IF(emit_stack_frame_release(compiler, -1));
+ *extra_space = 0;
+ }
/* Process arguments in reversed direction. */
while (types) {
- switch (types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- arg_count--;
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
float_arg_count--;
- stack_offset = offsets[arg_count];
+ offset = *(--offset_ptr);
+
+ SLJIT_ASSERT((offset & 0x7) == 0);
- if (stack_offset < 16) {
- if (src_offset == stack_offset) {
+ if (offset < 4 * sizeof(sljit_sw)) {
+ if (src_offset == offset || src_offset == offset + sizeof(sljit_sw)) {
FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | (src_offset >> 2)));
*src = TMP_REG1;
}
- FAIL_IF(push_inst(compiler, VMOV | 0x100000 | (float_arg_count << 16) | (stack_offset << 10)));
+ FAIL_IF(push_inst(compiler, VMOV2 | 0x100000 | (offset << 10) | ((offset + sizeof(sljit_sw)) << 14) | float_arg_count));
} else
- FAIL_IF(push_inst(compiler, VSTR_F32 | 0x800000 | RN(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2)));
+ FAIL_IF(push_inst(compiler, VSTR_F32 | 0x800100 | RN(SLJIT_SP)
+ | (float_arg_count << 12) | ((offset - 4 * sizeof(sljit_sw)) >> 2)));
break;
- case SLJIT_ARG_TYPE_F64:
- arg_count--;
+ case SLJIT_ARG_TYPE_F32:
float_arg_count--;
- stack_offset = offsets[arg_count];
+ offset = *(--offset_ptr);
- SLJIT_ASSERT((stack_offset & 0x7) == 0);
-
- if (stack_offset < 16) {
- if (src_offset == stack_offset || src_offset == stack_offset + sizeof(sljit_sw)) {
+ if (offset < 4 * sizeof(sljit_sw)) {
+ if (src_offset == offset) {
FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | (src_offset >> 2)));
*src = TMP_REG1;
}
- FAIL_IF(push_inst(compiler, VMOV2 | 0x100000 | (stack_offset << 10) | ((stack_offset + sizeof(sljit_sw)) << 14) | float_arg_count));
+ FAIL_IF(push_inst(compiler, VMOV | 0x100000 | (float_arg_count << 16) | (offset << 10)));
} else
- FAIL_IF(push_inst(compiler, VSTR_F32 | 0x800100 | RN(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2)));
+ FAIL_IF(push_inst(compiler, VSTR_F32 | 0x800000 | RN(SLJIT_SP)
+ | (float_arg_count << 12) | ((offset - 4 * sizeof(sljit_sw)) >> 2)));
break;
default:
- arg_count--;
word_arg_offset -= sizeof(sljit_sw);
- stack_offset = offsets[arg_count];
+ offset = *(--offset_ptr);
- SLJIT_ASSERT(stack_offset >= word_arg_offset);
+ SLJIT_ASSERT(offset >= word_arg_offset);
- if (stack_offset != word_arg_offset) {
- if (stack_offset < 16) {
- if (src_offset == stack_offset) {
+ if (offset != word_arg_offset) {
+ if (offset < 4 * sizeof(sljit_sw)) {
+ if (src_offset == offset) {
FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | (src_offset >> 2)));
*src = TMP_REG1;
}
else if (src_offset == word_arg_offset) {
- *src = 1 + (stack_offset >> 2);
- src_offset = stack_offset;
+ *src = (sljit_s32)(SLJIT_R0 + (offset >> 2));
+ src_offset = offset;
}
- FAIL_IF(push_inst(compiler, MOV | (stack_offset << 10) | (word_arg_offset >> 2)));
+ FAIL_IF(push_inst(compiler, MOV | (offset << 10) | (word_arg_offset >> 2)));
} else
- FAIL_IF(push_inst(compiler, data_transfer_insts[WORD_SIZE] | 0x800000 | RN(SLJIT_SP) | (word_arg_offset << 10) | (stack_offset - 16)));
+ FAIL_IF(push_inst(compiler, STR | 0x800000 | RN(SLJIT_SP) | (word_arg_offset << 10) | (offset - 4 * sizeof(sljit_sw))));
}
break;
}
- types >>= SLJIT_DEF_SHIFT;
+ types >>= SLJIT_ARG_SHIFT;
}
return SLJIT_SUCCESS;
@@ -2389,83 +3126,51 @@ static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit
static sljit_s32 softfloat_post_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
{
- sljit_s32 stack_size = 0;
-
- if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32)
- FAIL_IF(push_inst(compiler, VMOV | (0 << 16) | (0 << 12)));
- if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64)
+ if ((arg_types & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F64)
FAIL_IF(push_inst(compiler, VMOV2 | (1 << 16) | (0 << 12) | 0));
+ if ((arg_types & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F32)
+ FAIL_IF(push_inst(compiler, VMOV | (0 << 16) | (0 << 12)));
- arg_types >>= SLJIT_DEF_SHIFT;
-
- while (arg_types) {
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- stack_size += sizeof(sljit_f32);
- break;
- case SLJIT_ARG_TYPE_F64:
- if (stack_size & 0x7)
- stack_size += sizeof(sljit_sw);
- stack_size += sizeof(sljit_f64);
- break;
- default:
- stack_size += sizeof(sljit_sw);
- break;
- }
-
- arg_types >>= SLJIT_DEF_SHIFT;
- }
-
- if (stack_size <= 16)
- return SLJIT_SUCCESS;
-
- return push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | (((stack_size - 16) + 0x7) & ~0x7));
+ return SLJIT_SUCCESS;
}
#else /* !__SOFTFP__ */
static sljit_s32 hardfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
{
- sljit_u32 remap = 0;
- sljit_u32 offset = 0;
- sljit_u32 new_offset, mask;
+ sljit_u32 offset = SLJIT_FR0;
+ sljit_u32 new_offset = SLJIT_FR0;
+ sljit_u32 f32_offset = 0;
/* Remove return value. */
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32) {
- new_offset = 0;
- mask = 1;
-
- while (remap & mask) {
- new_offset++;
- mask <<= 1;
- }
- remap |= mask;
-
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
if (offset != new_offset)
FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32,
- 0, (new_offset >> 1) + 1, (offset >> 1) + 1, 0) | ((new_offset & 0x1) ? 0x400000 : 0)));
-
- offset += 2;
- }
- else if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64) {
- new_offset = 0;
- mask = 3;
+ SLJIT_32, new_offset, offset, 0)));
- while (remap & mask) {
- new_offset += 2;
- mask <<= 2;
+ new_offset++;
+ offset++;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ if (f32_offset != 0) {
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32,
+ 0x400000, f32_offset, offset, 0)));
+ f32_offset = 0;
+ } else {
+ if (offset != new_offset)
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32,
+ 0, new_offset, offset, 0)));
+ f32_offset = new_offset;
+ new_offset++;
}
- remap |= mask;
-
- if (offset != new_offset)
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, SLJIT_F32_OP, (new_offset >> 1) + 1, (offset >> 1) + 1, 0)));
-
- offset += 2;
+ offset++;
+ break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
return SLJIT_SUCCESS;
@@ -2473,41 +3178,60 @@ static sljit_s32 hardfloat_call_with_args(struct sljit_compiler *compiler, sljit
#endif /* __SOFTFP__ */
-#undef EMIT_FPU_OPERATION
-
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 arg_types)
{
#ifdef __SOFTFP__
struct sljit_jump *jump;
+ sljit_u32 extra_space = (sljit_u32)type;
#endif
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
#ifdef __SOFTFP__
- PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL));
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG) {
+ PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL, &extra_space));
+ SLJIT_ASSERT((extra_space & 0x7) == 0);
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if ((type & SLJIT_CALL_RETURN) && extra_space == 0)
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
- jump = sljit_emit_jump(compiler, type);
- PTR_FAIL_IF(jump == NULL);
+ SLJIT_SKIP_CHECKS(compiler);
+ jump = sljit_emit_jump(compiler, type);
+ PTR_FAIL_IF(jump == NULL);
- PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types));
- return jump;
-#else /* !__SOFTFP__ */
- PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+ if (extra_space > 0) {
+ if (type & SLJIT_CALL_RETURN)
+ PTR_FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1,
+ TMP_REG2, SLJIT_SP, extra_space - sizeof(sljit_sw))));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ PTR_FAIL_IF(push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | extra_space));
- return sljit_emit_jump(compiler, type);
+ if (type & SLJIT_CALL_RETURN) {
+ PTR_FAIL_IF(push_inst(compiler, BX | RM(TMP_REG2)));
+ return jump;
+ }
+ }
+
+ SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN));
+ PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types));
+ return jump;
+ }
#endif /* __SOFTFP__ */
+
+ if (type & SLJIT_CALL_RETURN) {
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, -1));
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
+ }
+
+#ifndef __SOFTFP__
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+#endif /* !__SOFTFP__ */
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_jump(compiler, type);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
@@ -2520,7 +3244,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
SLJIT_ASSERT(reg_map[TMP_REG1] != 14);
- if (!(src & SLJIT_IMM)) {
+ if (src != SLJIT_IMM) {
if (FAST_IS_REG(src)) {
SLJIT_ASSERT(reg_map[src] != 14);
return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(src));
@@ -2535,18 +3259,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0));
- jump->u.target = srcw;
+ jump->u.target = (sljit_uw)srcw;
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
if (type >= SLJIT_FAST_CALL)
FAIL_IF(prepare_blx(compiler));
FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, TMP_PC, 0), 0));
if (type >= SLJIT_FAST_CALL)
FAIL_IF(emit_blx(compiler));
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
FAIL_IF(emit_imm(compiler, TMP_REG1, 0));
FAIL_IF(push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1)));
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
jump->addr = compiler->size;
return SLJIT_SUCCESS;
}
@@ -2555,50 +3279,104 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
sljit_s32 arg_types,
sljit_s32 src, sljit_sw srcw)
{
+#ifdef __SOFTFP__
+ sljit_u32 extra_space = (sljit_u32)type;
+#endif
+
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
-#ifdef __SOFTFP__
if (src & SLJIT_MEM) {
FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
- FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src));
+ if ((type & SLJIT_CALL_RETURN) && (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) {
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | RM(src)));
+ src = TMP_REG1;
+ }
+
+#ifdef __SOFTFP__
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG) {
+ FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src, &extra_space));
+ SLJIT_ASSERT((extra_space & 0x7) == 0);
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if ((type & SLJIT_CALL_RETURN) && extra_space == 0)
+ type = SLJIT_JUMP;
- FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
+ SLJIT_SKIP_CHECKS(compiler);
+ FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
- return softfloat_post_call_with_args(compiler, arg_types);
-#else /* !__SOFTFP__ */
- FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+ if (extra_space > 0) {
+ if (type & SLJIT_CALL_RETURN)
+ FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1,
+ TMP_REG2, SLJIT_SP, extra_space - sizeof(sljit_sw))));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ FAIL_IF(push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | extra_space));
- return sljit_emit_ijump(compiler, type, src, srcw);
+ if (type & SLJIT_CALL_RETURN)
+ return push_inst(compiler, BX | RM(TMP_REG2));
+ }
+
+ SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN));
+ return softfloat_post_call_with_args(compiler, arg_types);
+ }
#endif /* __SOFTFP__ */
+
+ if (type & SLJIT_CALL_RETURN) {
+ FAIL_IF(emit_stack_frame_release(compiler, -1));
+ type = SLJIT_JUMP;
+ }
+
+#ifndef __SOFTFP__
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+#endif /* !__SOFTFP__ */
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, type, src, srcw);
+}
+
+#ifdef __SOFTFP__
+
+static SLJIT_INLINE sljit_s32 emit_fmov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+{
+ if (compiler->options & SLJIT_ENTER_REG_ARG) {
+ if (src == SLJIT_FR0)
+ return SLJIT_SUCCESS;
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_fop1(compiler, op, SLJIT_RETURN_FREG, 0, src, srcw);
+ }
+
+ if (FAST_IS_REG(src)) {
+ if (op & SLJIT_32)
+ return push_inst(compiler, VMOV | (1 << 20) | RD(SLJIT_R0) | VN(src));
+ return push_inst(compiler, VMOV2 | (1 << 20) | RD(SLJIT_R0) | RN(SLJIT_R1) | VM(src));
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
+
+ if (op & SLJIT_32)
+ return sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_R0, 0, src, srcw);
+ return sljit_emit_mem(compiler, SLJIT_MOV, SLJIT_REG_PAIR(SLJIT_R0, SLJIT_R1), src, srcw);
}
+#endif /* __SOFTFP__ */
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
{
sljit_s32 dst_reg, flags = GET_ALL_FLAGS(op);
- sljit_uw cc, ins;
+ sljit_ins cc, ins;
CHECK_ERROR();
CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
ADJUST_LOCAL_OFFSET(dst, dstw);
op = GET_OPCODE(op);
- cc = get_cc(compiler, type & 0xff);
+ cc = get_cc(compiler, type);
dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1;
if (op < SLJIT_ADD) {
@@ -2627,41 +3405,159 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
{
- sljit_uw cc, tmp;
+ sljit_ins cc, tmp;
CHECK_ERROR();
- CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
+ CHECK(check_sljit_emit_select(compiler, type, dst_reg, src1, src1w, src2_reg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (src2_reg != dst_reg && src1 == dst_reg) {
+ src1 = src2_reg;
+ src1w = 0;
+ src2_reg = dst_reg;
+ type ^= 0x1;
+ }
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, (src2_reg != dst_reg) ? dst_reg : TMP_REG1, src1, src1w, TMP_REG2));
- dst_reg &= ~SLJIT_I32_OP;
+ if (src2_reg != dst_reg) {
+ src1 = src2_reg;
+ src1w = 0;
+ type ^= 0x1;
+ } else {
+ src1 = TMP_REG1;
+ src1w = 0;
+ }
+ } else if (dst_reg != src2_reg)
+ FAIL_IF(push_inst(compiler, MOV | RD(dst_reg) | RM(src2_reg)));
- cc = get_cc(compiler, type & 0xff);
+ cc = get_cc(compiler, type & ~SLJIT_32);
- if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
- tmp = get_imm(srcw);
+ if (SLJIT_UNLIKELY(src1 == SLJIT_IMM)) {
+ tmp = get_imm((sljit_uw)src1w);
if (tmp)
return push_inst(compiler, ((MOV | RD(dst_reg) | tmp) & ~COND_MASK) | cc);
- tmp = get_imm(~srcw);
+ tmp = get_imm(~(sljit_uw)src1w);
if (tmp)
return push_inst(compiler, ((MVN | RD(dst_reg) | tmp) & ~COND_MASK) | cc);
#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
- tmp = (sljit_uw) srcw;
+ tmp = (sljit_ins)src1w;
FAIL_IF(push_inst(compiler, (MOVW & ~COND_MASK) | cc | RD(dst_reg) | ((tmp << 4) & 0xf0000) | (tmp & 0xfff)));
if (tmp <= 0xffff)
return SLJIT_SUCCESS;
return push_inst(compiler, (MOVT & ~COND_MASK) | cc | RD(dst_reg) | ((tmp >> 12) & 0xf0000) | ((tmp >> 16) & 0xfff));
-#else
- FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
- src = TMP_REG1;
-#endif
+#else /* !SLJIT_CONFIG_ARM_V7 */
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)src1w));
+ src1 = TMP_REG1;
+#endif /* SLJIT_CONFIG_ARM_V7 */
+ }
+
+ return push_inst(compiler, ((MOV | RD(dst_reg) | RM(src1)) & ~COND_MASK) | cc);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
+{
+ sljit_ins cc;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fselect(compiler, type, dst_freg, src1, src1w, src2_freg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ type ^= SLJIT_32;
+
+ if (dst_freg != src2_freg) {
+ if (dst_freg == src1) {
+ src1 = src2_freg;
+ src1w = 0;
+ type ^= 0x1;
+ } else
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, (type & SLJIT_32), dst_freg, src2_freg, 0)));
+ }
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_fop_mem(compiler, (type & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w));
+ src1 = TMP_FREG1;
+ }
+
+ cc = get_cc(compiler, type & ~SLJIT_32);
+ return push_inst(compiler, EMIT_FPU_OPERATION((VMOV_F32 & ~COND_MASK) | cc, (type & SLJIT_32), dst_freg, src1, 0));
+}
+
+#undef EMIT_FPU_OPERATION
+
+static sljit_s32 update_mem_addr(struct sljit_compiler *compiler, sljit_s32 *mem, sljit_sw *memw, sljit_s32 max_offset)
+{
+ sljit_s32 arg = *mem;
+ sljit_sw argw = *memw;
+ sljit_uw imm, tmp;
+ sljit_sw mask = 0xfff;
+ sljit_sw sign = 0x1000;
+
+ SLJIT_ASSERT(max_offset >= 0xf00);
+
+ *mem = TMP_REG1;
+
+ if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
+ *memw = 0;
+ return push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | ((sljit_ins)(argw & 0x3) << 7));
+ }
+
+ arg &= REG_MASK;
+
+ if (arg) {
+ if (argw <= max_offset && argw >= -mask) {
+ *mem = arg;
+ return SLJIT_SUCCESS;
+ }
+
+ if (argw >= 0) {
+ tmp = (sljit_uw)(argw & (sign | mask));
+ tmp = (sljit_uw)((argw + ((tmp <= (sljit_uw)max_offset || tmp == (sljit_uw)sign) ? 0 : sign)) & ~mask);
+ imm = get_imm(tmp);
+
+ if (imm) {
+ *memw = argw - (sljit_sw)tmp;
+ SLJIT_ASSERT(*memw >= -mask && *memw <= max_offset);
+
+ return push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg) | imm);
+ }
+ } else {
+ tmp = (sljit_uw)(-argw & (sign | mask));
+ tmp = (sljit_uw)((-argw + ((tmp <= (sljit_uw)((sign << 1) - max_offset - 1)) ? 0 : sign)) & ~mask);
+ imm = get_imm(tmp);
+
+ if (imm) {
+ *memw = argw + (sljit_sw)tmp;
+ SLJIT_ASSERT(*memw >= -mask && *memw <= max_offset);
+
+ return push_inst(compiler, SUB | RD(TMP_REG1) | RN(arg) | imm);
+ }
+ }
}
- return push_inst(compiler, ((MOV | RD(dst_reg) | RM(src)) & ~COND_MASK) | cc);
+ tmp = (sljit_uw)(argw & (sign | mask));
+ tmp = (sljit_uw)((argw + ((tmp <= (sljit_uw)max_offset || tmp == (sljit_uw)sign) ? 0 : sign)) & ~mask);
+ *memw = argw - (sljit_sw)tmp;
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, tmp));
+
+ if (arg == 0)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, ADD | RD(TMP_REG1) | RN(TMP_REG1) | RM(arg));
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
@@ -2669,17 +3565,49 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
sljit_s32 mem, sljit_sw memw)
{
sljit_s32 flags;
- sljit_uw is_type1_transfer, inst;
CHECK_ERROR();
CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+ if (!(reg & REG_PAIR_MASK))
+ return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
+
+ ADJUST_LOCAL_OFFSET(mem, memw);
+
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4));
+
+ flags = WORD_SIZE;
+
+ if (!(type & SLJIT_MEM_STORE)) {
+ if (REG_PAIR_FIRST(reg) == (mem & REG_MASK)) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, REG_PAIR_SECOND(reg), SLJIT_MEM1(mem), memw + SSIZE_OF(sw), TMP_REG1));
+ return emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, REG_PAIR_FIRST(reg), SLJIT_MEM1(mem), memw, TMP_REG1);
+ }
+
+ flags = WORD_SIZE | LOAD_DATA;
+ }
+
+ FAIL_IF(emit_op_mem(compiler, flags, REG_PAIR_FIRST(reg), SLJIT_MEM1(mem), memw, TMP_REG1));
+ return emit_op_mem(compiler, flags, REG_PAIR_SECOND(reg), SLJIT_MEM1(mem), memw + SSIZE_OF(sw), TMP_REG1);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_s32 flags;
+ sljit_ins is_type1_transfer, inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw));
+
is_type1_transfer = 1;
switch (type & 0xff) {
case SLJIT_MOV:
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
case SLJIT_MOV_P:
flags = WORD_SIZE;
break;
@@ -2713,16 +3641,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
if (!is_type1_transfer && memw != 0)
return SLJIT_ERR_UNSUPPORTED;
- }
- else {
+ } else {
if (is_type1_transfer) {
if (memw > 4095 || memw < -4095)
return SLJIT_ERR_UNSUPPORTED;
- }
- else {
- if (memw > 255 || memw < -255)
- return SLJIT_ERR_UNSUPPORTED;
- }
+ } else if (memw > 255 || memw < -255)
+ return SLJIT_ERR_UNSUPPORTED;
}
if (type & SLJIT_MEM_SUPP)
@@ -2731,25 +3655,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
memw &= 0x3;
- inst = EMIT_DATA_TRANSFER(flags, 1, reg, mem & REG_MASK, RM(OFFS_REG(mem)) | (memw << 7));
+ inst = EMIT_DATA_TRANSFER(flags, 1, reg, mem & REG_MASK, RM(OFFS_REG(mem)) | ((sljit_ins)memw << 7));
if (is_type1_transfer)
inst |= (1 << 25);
- if (type & SLJIT_MEM_PRE)
- inst |= (1 << 21);
- else
+ if (type & SLJIT_MEM_POST)
inst ^= (1 << 24);
+ else
+ inst |= (1 << 21);
return push_inst(compiler, inst);
}
inst = EMIT_DATA_TRANSFER(flags, 0, reg, mem & REG_MASK, 0);
- if (type & SLJIT_MEM_PRE)
- inst |= (1 << 21);
- else
+ if (type & SLJIT_MEM_POST)
inst ^= (1 << 24);
+ else
+ inst |= (1 << 21);
if (is_type1_transfer) {
if (memw >= 0)
@@ -2757,7 +3681,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
else
memw = -memw;
- return push_inst(compiler, inst | memw);
+ return push_inst(compiler, inst | (sljit_ins)memw);
}
if (memw >= 0)
@@ -2765,7 +3689,750 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
else
memw = -memw;
- return push_inst(compiler, inst | TYPE2_TRANSFER_IMM(memw));
+ return push_inst(compiler, inst | TYPE2_TRANSFER_IMM((sljit_ins)memw));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
+
+ if (type & SLJIT_MEM_ALIGNED_32)
+ return emit_fop_mem(compiler, ((type ^ SLJIT_32) & SLJIT_32) | ((type & SLJIT_MEM_STORE) ? 0 : FPU_LOAD), freg, mem, memw);
+
+ if (type & SLJIT_MEM_STORE) {
+ FAIL_IF(push_inst(compiler, VMOV | (1 << 20) | VN(freg) | RD(TMP_REG2)));
+
+ if (type & SLJIT_32)
+ return emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1);
+
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4));
+ mem |= SLJIT_MEM;
+
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1));
+ FAIL_IF(push_inst(compiler, VMOV | (1 << 20) | VN(freg) | 0x80 | RD(TMP_REG2)));
+ return emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw + 4, TMP_REG1);
+ }
+
+ if (type & SLJIT_32) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG2, mem, memw, TMP_REG1));
+ return push_inst(compiler, VMOV | VN(freg) | RD(TMP_REG2));
+ }
+
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4));
+ mem |= SLJIT_MEM;
+
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG2, mem, memw, TMP_REG1));
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, mem, memw + 4, TMP_REG1));
+ return push_inst(compiler, VMOV2 | VM(freg) | RD(TMP_REG2) | RN(TMP_REG1));
+}
+
+static sljit_s32 sljit_emit_simd_mem_offset(struct sljit_compiler *compiler, sljit_s32 *mem_ptr, sljit_sw memw)
+{
+ sljit_s32 mem = *mem_ptr;
+ sljit_uw imm;
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ *mem_ptr = TMP_REG1;
+ return push_inst(compiler, ADD | RD(TMP_REG1) | RN(mem & REG_MASK) | RM(OFFS_REG(mem)) | ((sljit_ins)(memw & 0x3) << 7));
+ }
+
+ if (SLJIT_UNLIKELY(!(mem & REG_MASK))) {
+ *mem_ptr = TMP_REG1;
+ return load_immediate(compiler, TMP_REG1, (sljit_uw)memw);
+ }
+
+ mem &= REG_MASK;
+
+ if (memw == 0) {
+ *mem_ptr = mem;
+ return SLJIT_SUCCESS;
+ }
+
+ *mem_ptr = TMP_REG1;
+ imm = get_imm((sljit_uw)(memw < 0 ? -memw : memw));
+
+ if (imm != 0)
+ return push_inst(compiler, ((memw < 0) ? SUB : ADD) | RD(TMP_REG1) | RN(mem) | imm);
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw));
+ return push_inst(compiler, ADD | RD(TMP_REG1) | RN(TMP_REG1) | RM(mem));
+}
+
+static SLJIT_INLINE sljit_s32 simd_get_quad_reg_index(sljit_s32 freg)
+{
+ freg += freg & 0x1;
+
+ SLJIT_ASSERT((freg_map[freg] & 0x1) == (freg <= SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS));
+
+ if (freg <= SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS)
+ freg--;
+
+ return freg;
+}
+
+#define SLJIT_QUAD_OTHER_HALF(freg) ((((freg) & 0x1) << 1) - 1)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 alignment = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_mov(compiler, type, freg, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (!(srcdst & SLJIT_MEM)) {
+ if (reg_size == 4)
+ srcdst = simd_get_quad_reg_index(srcdst);
+
+ if (type & SLJIT_SIMD_STORE)
+ ins = VD(srcdst) | VN(freg) | VM(freg);
+ else
+ ins = VD(freg) | VN(srcdst) | VM(srcdst);
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 6;
+
+ return push_inst(compiler, VORR | ins);
+ }
+
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &srcdst, srcdstw));
+
+ if (elem_size > 3)
+ elem_size = 3;
+
+ ins = ((type & SLJIT_SIMD_STORE) ? VST1 : VLD1) | VD(freg)
+ | (sljit_ins)((reg_size == 3) ? (0x7 << 8) : (0xa << 8));
+
+ SLJIT_ASSERT(reg_size >= alignment);
+
+ if (alignment == 3)
+ ins |= 0x10;
+ else if (alignment >= 3)
+ ins |= 0x20;
+
+ return push_inst(compiler, ins | RN(srcdst) | ((sljit_ins)elem_size) << 6 | 0xf);
+}
+
+static sljit_ins simd_get_imm(sljit_s32 elem_size, sljit_uw value)
+{
+ sljit_ins result;
+
+ if (elem_size > 1 && (sljit_u16)value == (value >> 16)) {
+ elem_size = 1;
+ value = (sljit_u16)value;
+ }
+
+ if (elem_size == 1 && (sljit_u8)value == (value >> 8)) {
+ elem_size = 0;
+ value = (sljit_u8)value;
+ }
+
+ switch (elem_size) {
+ case 0:
+ SLJIT_ASSERT(value <= 0xff);
+ result = 0xe00;
+ break;
+ case 1:
+ SLJIT_ASSERT(value <= 0xffff);
+ result = 0;
+
+ while (1) {
+ if (value <= 0xff) {
+ result |= 0x800;
+ break;
+ }
+
+ if ((value & 0xff) == 0) {
+ value >>= 8;
+ result |= 0xa00;
+ break;
+ }
+
+ if (result != 0)
+ return ~(sljit_ins)0;
+
+ value ^= (sljit_uw)0xffff;
+ result = (1 << 5);
+ }
+ break;
+ default:
+ SLJIT_ASSERT(value <= 0xffffffff);
+ result = 0;
+
+ while (1) {
+ if (value <= 0xff) {
+ result |= 0x000;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff00) == 0) {
+ value >>= 8;
+ result |= 0x200;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff0000) == 0) {
+ value >>= 16;
+ result |= 0x400;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff000000) == 0) {
+ value >>= 24;
+ result |= 0x600;
+ break;
+ }
+
+ if ((value & (sljit_uw)0xff) == 0xff && (value >> 16) == 0) {
+ value >>= 8;
+ result |= 0xc00;
+ break;
+ }
+
+ if ((value & (sljit_uw)0xffff) == 0xffff && (value >> 24) == 0) {
+ value >>= 16;
+ result |= 0xd00;
+ break;
+ }
+
+ if (result != 0)
+ return ~(sljit_ins)0;
+
+ value = ~value;
+ result = (1 << 5);
+ }
+ break;
+ }
+
+ return ((sljit_ins)value & 0xf) | (((sljit_ins)value & 0x70) << 12) | (((sljit_ins)value & 0x80) << 17) | result;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins, imm;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_replicate(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) ? (elem_size < 2 || elem_size > 3) : (elem_size > 2))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (src == SLJIT_IMM && srcw == 0)
+ return push_inst(compiler, VMOV_i | ((reg_size == 4) ? (1 << 6) : 0) | VD(freg));
+
+ if (SLJIT_UNLIKELY(elem_size == 3)) {
+ SLJIT_ASSERT(type & SLJIT_SIMD_FLOAT);
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_fop_mem(compiler, FPU_LOAD | SLJIT_32, freg, src, srcw));
+ src = freg;
+ } else if (freg != src)
+ FAIL_IF(push_inst(compiler, VORR | VD(freg) | VN(src) | VM(src)));
+
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+
+ if (freg != src)
+ return push_inst(compiler, VORR | VD(freg) | VN(src) | VM(src));
+ return SLJIT_SUCCESS;
+ }
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &src, srcw));
+
+ ins = (sljit_ins)(elem_size << 6);
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 5;
+
+ return push_inst(compiler, VLD1_r | ins | VD(freg) | RN(src) | 0xf);
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ SLJIT_ASSERT(elem_size == 2);
+ ins = ((sljit_ins)freg_ebit_map[src] << (16 + 2 + 1)) | ((sljit_ins)1 << (16 + 2));
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 6;
+
+ return push_inst(compiler, VDUP_s | ins | VD(freg) | (sljit_ins)freg_map[src]);
+ }
+
+ if (src == SLJIT_IMM) {
+ if (elem_size < 2)
+ srcw &= ((sljit_sw)1 << (((sljit_sw)1 << elem_size) << 3)) - 1;
+
+ imm = simd_get_imm(elem_size, (sljit_uw)srcw);
+
+ if (imm != ~(sljit_ins)0) {
+ if (reg_size == 4)
+ imm |= (sljit_ins)1 << 6;
+
+ return push_inst(compiler, VMOV_i | imm | VD(freg));
+ }
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)srcw));
+ src = TMP_REG1;
+ }
+
+ switch (elem_size) {
+ case 0:
+ ins = 1 << 22;
+ break;
+ case 1:
+ ins = 1 << 5;
+ break;
+ default:
+ ins = 0;
+ break;
+ }
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 21;
+
+ return push_inst(compiler, VDUP | ins | VN(freg) | RD(src));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg, sljit_s32 lane_index,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_mov(compiler, type, freg, lane_index, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) ? (elem_size < 2 || elem_size > 3) : (elem_size > 2))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (type & SLJIT_SIMD_LANE_ZERO) {
+ ins = (reg_size == 3) ? 0 : ((sljit_ins)1 << 6);
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (elem_size == 3 && !(srcdst & SLJIT_MEM)) {
+ if (lane_index == 1)
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+
+ if (srcdst != freg)
+ FAIL_IF(push_inst(compiler, VORR | VD(freg) | VN(srcdst) | VM(srcdst)));
+
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ return push_inst(compiler, VMOV_i | VD(freg));
+ }
+
+ if (srcdst == freg || (elem_size == 3 && srcdst == (freg + SLJIT_QUAD_OTHER_HALF(freg)))) {
+ FAIL_IF(push_inst(compiler, VORR | ins | VD(TMP_FREG2) | VN(freg) | VM(freg)));
+ srcdst = TMP_FREG2;
+ srcdstw = 0;
+ }
+ }
+
+ FAIL_IF(push_inst(compiler, VMOV_i | ins | VD(freg)));
+ }
+
+ if (reg_size == 4 && lane_index >= (0x8 >> elem_size)) {
+ lane_index -= (0x8 >> elem_size);
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ }
+
+ if (srcdst & SLJIT_MEM) {
+ if (elem_size == 3)
+ return emit_fop_mem(compiler, ((type & SLJIT_SIMD_STORE) ? 0 : FPU_LOAD) | SLJIT_32, freg, srcdst, srcdstw);
+
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &srcdst, srcdstw));
+
+ lane_index = lane_index << elem_size;
+ ins = (sljit_ins)((elem_size << 10) | (lane_index << 5));
+ return push_inst(compiler, ((type & SLJIT_SIMD_STORE) ? VST1_s : VLD1_s) | ins | VD(freg) | RN(srcdst) | 0xf);
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (elem_size == 3) {
+ if (type & SLJIT_SIMD_STORE)
+ return push_inst(compiler, VORR | VD(srcdst) | VN(freg) | VM(freg));
+ return push_inst(compiler, VMOV_F32 | SLJIT_32 | VD(freg) | VM(srcdst));
+ }
+
+ if (type & SLJIT_SIMD_STORE) {
+ if (freg_ebit_map[freg] == 0) {
+ if (lane_index == 1)
+ freg = SLJIT_F64_SECOND(freg);
+
+ return push_inst(compiler, VMOV_F32 | VD(srcdst) | VM(freg));
+ }
+
+ FAIL_IF(push_inst(compiler, VMOV_s | (1 << 20) | ((sljit_ins)lane_index << 21) | VN(freg) | RD(TMP_REG1)));
+ return push_inst(compiler, VMOV | VN(srcdst) | RD(TMP_REG1));
+ }
+
+ FAIL_IF(push_inst(compiler, VMOV | (1 << 20) | VN(srcdst) | RD(TMP_REG1)));
+ return push_inst(compiler, VMOV_s | ((sljit_ins)lane_index << 21) | VN(freg) | RD(TMP_REG1));
+ }
+
+ if (srcdst == SLJIT_IMM) {
+ if (elem_size < 2)
+ srcdstw &= ((sljit_sw)1 << (((sljit_sw)1 << elem_size) << 3)) - 1;
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)srcdstw));
+ srcdst = TMP_REG1;
+ }
+
+ if (elem_size == 0)
+ ins = 0x400000;
+ else if (elem_size == 1)
+ ins = 0x20;
+ else
+ ins = 0;
+
+ lane_index = lane_index << elem_size;
+ ins |= (sljit_ins)(((lane_index & 0x4) << 19) | ((lane_index & 0x3) << 5));
+
+ if (type & SLJIT_SIMD_STORE) {
+ ins |= (1 << 20);
+
+ if (elem_size < 2 && !(type & SLJIT_SIMD_LANE_SIGNED))
+ ins |= (1 << 23);
+ }
+
+ return push_inst(compiler, VMOV_s | ins | VN(freg) | RD(srcdst));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_s32 src_lane_index)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_replicate(compiler, type, freg, src, src_lane_index));
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4) {
+ freg = simd_get_quad_reg_index(freg);
+ src = simd_get_quad_reg_index(src);
+
+ if (src_lane_index >= (0x8 >> elem_size)) {
+ src_lane_index -= (0x8 >> elem_size);
+ src += SLJIT_QUAD_OTHER_HALF(src);
+ }
+ }
+
+ if (elem_size == 3) {
+ if (freg != src)
+ FAIL_IF(push_inst(compiler, VORR | VD(freg) | VN(src) | VM(src)));
+
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+
+ if (freg != src)
+ return push_inst(compiler, VORR | VD(freg) | VN(src) | VM(src));
+ return SLJIT_SUCCESS;
+ }
+
+ ins = ((((sljit_ins)src_lane_index << 1) | 1) << (16 + elem_size));
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 6;
+
+ return push_inst(compiler, VDUP_s | ins | VD(freg) | VM(src));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_extend(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 elem2_size = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ sljit_s32 dst_reg;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_extend(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size != 2 || elem2_size != 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &src, srcw));
+ if (reg_size == 4 && elem2_size - elem_size == 1)
+ FAIL_IF(push_inst(compiler, VLD1 | (0x7 << 8) | VD(freg) | RN(src) | 0xf));
+ else
+ FAIL_IF(push_inst(compiler, VLD1_s | (sljit_ins)((reg_size - elem2_size + elem_size) << 10) | VD(freg) | RN(src) | 0xf));
+ src = freg;
+ } else if (reg_size == 4)
+ src = simd_get_quad_reg_index(src);
+
+ if (!(type & SLJIT_SIMD_FLOAT)) {
+ dst_reg = (reg_size == 4) ? freg : TMP_FREG2;
+
+ do {
+ FAIL_IF(push_inst(compiler, VSHLL | ((type & SLJIT_SIMD_EXTEND_SIGNED) ? 0 : (1 << 24))
+ | ((sljit_ins)1 << (19 + elem_size)) | VD(dst_reg) | VM(src)));
+ src = dst_reg;
+ } while (++elem_size < elem2_size);
+
+ if (dst_reg == TMP_FREG2)
+ return push_inst(compiler, VORR | VD(freg) | VN(TMP_FREG2) | VM(TMP_FREG2));
+ return SLJIT_SUCCESS;
+ }
+
+ /* No SIMD variant, must use VFP instead. */
+ SLJIT_ASSERT(reg_size == 4);
+
+ if (freg == src) {
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ FAIL_IF(push_inst(compiler, VCVT_F64_F32 | VD(freg) | VM(src) | 0x20));
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ return push_inst(compiler, VCVT_F64_F32 | VD(freg) | VM(src));
+ }
+
+ FAIL_IF(push_inst(compiler, VCVT_F64_F32 | VD(freg) | VM(src)));
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ return push_inst(compiler, VCVT_F64_F32 | VD(freg) | VM(src) | 0x20);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_sign(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins, imms;
+ sljit_s32 dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_sign(compiler, type, freg, dst, dstw));
+
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ switch (elem_size) {
+ case 0:
+ imms = 0x243219;
+ ins = VSHR | (1 << 24) | (0x9 << 16);
+ break;
+ case 1:
+ imms = (reg_size == 4) ? 0x243219 : 0x2231;
+ ins = VSHR | (1 << 24) | (0x11 << 16);
+ break;
+ case 2:
+ imms = (reg_size == 4) ? 0x2231 : 0x21;
+ ins = VSHR | (1 << 24) | (0x21 << 16);
+ break;
+ default:
+ imms = 0x21;
+ ins = VSHR | (1 << 24) | (0x1 << 16) | (1 << 7);
+ break;
+ }
+
+ if (reg_size == 4) {
+ freg = simd_get_quad_reg_index(freg);
+ ins |= (sljit_ins)1 << 6;
+ }
+
+ SLJIT_ASSERT((freg_map[TMP_FREG2] & 0x1) == 0);
+ FAIL_IF(push_inst(compiler, ins | VD(TMP_FREG2) | VM(freg)));
+
+ if (reg_size == 4 && elem_size > 0)
+ FAIL_IF(push_inst(compiler, VMOVN | ((sljit_ins)(elem_size - 1) << 18) | VD(TMP_FREG2) | VM(TMP_FREG2)));
+
+ ins = (reg_size == 4 && elem_size == 0) ? (1 << 6) : 0;
+
+ while (imms >= 0x100) {
+ FAIL_IF(push_inst(compiler, VSRA | (1 << 24) | ins | ((imms & 0xff) << 16) | VD(TMP_FREG2) | VM(TMP_FREG2)));
+ imms >>= 8;
+ }
+
+ FAIL_IF(push_inst(compiler, VSRA | (1 << 24) | ins | (1 << 7) | (imms << 16) | VD(TMP_FREG2) | VM(TMP_FREG2)));
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ FAIL_IF(push_inst(compiler, VMOV_s | (1 << 20) | (1 << 23) | (0x2 << 21) | RD(dst_r) | VN(TMP_FREG2)));
+
+ if (reg_size == 4 && elem_size == 0) {
+ SLJIT_ASSERT(freg_map[TMP_FREG2] + 1 == freg_map[TMP_FREG1]);
+ FAIL_IF(push_inst(compiler, VMOV_s | (1 << 20) | (1 << 23) | (0x2 << 21) | RD(TMP_REG2) | VN(TMP_FREG1)));
+ FAIL_IF(push_inst(compiler, ORR | RD(dst_r) | RN(dst_r) | RM(TMP_REG2) | (0x8 << 7)));
+ }
+
+ if (dst_r == TMP_REG1)
+ return emit_op_mem(compiler, WORD_SIZE, TMP_REG1, dst, dstw, TMP_REG2);
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_op2(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src1_freg, sljit_s32 src2_freg)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_op2(compiler, type, dst_freg, src1_freg, src2_freg));
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ switch (SLJIT_SIMD_GET_OPCODE(type)) {
+ case SLJIT_SIMD_OP2_AND:
+ ins = VAND;
+ break;
+ case SLJIT_SIMD_OP2_OR:
+ ins = VORR;
+ break;
+ case SLJIT_SIMD_OP2_XOR:
+ ins = VEOR;
+ break;
+ }
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4) {
+ dst_freg = simd_get_quad_reg_index(dst_freg);
+ src1_freg = simd_get_quad_reg_index(src1_freg);
+ src2_freg = simd_get_quad_reg_index(src2_freg);
+ ins |= (sljit_ins)1 << 6;
+ }
+
+ return push_inst(compiler, ins | VD(dst_freg) | VN(src1_freg) | VM(src2_freg));
+}
+
+#undef FPU_LOAD
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_load(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 mem_reg)
+{
+ sljit_u32 ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_load(compiler, op, dst_reg, mem_reg));
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV_U8:
+ ins = LDREXB;
+ break;
+ case SLJIT_MOV_U16:
+ ins = LDREXH;
+ break;
+ default:
+ ins = LDREX;
+ break;
+ }
+
+ return push_inst(compiler, ins | RN(mem_reg) | RD(dst_reg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_store(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src_reg,
+ sljit_s32 mem_reg,
+ sljit_s32 temp_reg)
+{
+ sljit_u32 ins;
+
+ /* temp_reg == mem_reg is undefined so use another temp register */
+ SLJIT_UNUSED_ARG(temp_reg);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_store(compiler, op, src_reg, mem_reg, temp_reg));
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV_U8:
+ ins = STREXB;
+ break;
+ case SLJIT_MOV_U16:
+ ins = STREXH;
+ break;
+ default:
+ ins = STREX;
+ break;
+ }
+
+ FAIL_IF(push_inst(compiler, ins | RN(mem_reg) | RD(TMP_REG1) | RM(src_reg)));
+ if (op & SLJIT_SET_ATOMIC_STORED)
+ return push_inst(compiler, CMP | SET_FLAGS | SRC2_IMM | RN(TMP_REG1));
+
+ return SLJIT_SUCCESS;
}
SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
@@ -2777,14 +4444,15 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi
CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value));
ADJUST_LOCAL_OFFSET(dst, dstw);
- dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG2;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- PTR_FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, dst_r, TMP_PC, 0), init_value));
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
+ PTR_FAIL_IF(push_inst_with_unique_literal(compiler,
+ EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, dst_r, TMP_PC, 0), (sljit_ins)init_value));
compiler->patches++;
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
PTR_FAIL_IF(emit_imm(compiler, dst_r, init_value));
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const));
PTR_FAIL_IF(!const_);
@@ -2804,14 +4472,14 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj
CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw));
ADJUST_LOCAL_OFFSET(dst, dstw);
- dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG2;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
-#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
+#if (defined SLJIT_CONFIG_ARM_V6 && SLJIT_CONFIG_ARM_V6)
PTR_FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, dst_r, TMP_PC, 0), 0));
compiler->patches++;
-#else
+#else /* !SLJIT_CONFIG_ARM_V6 */
PTR_FAIL_IF(emit_imm(compiler, dst_r, 0));
-#endif
+#endif /* SLJIT_CONFIG_ARM_V6 */
put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label));
PTR_FAIL_IF(!put_label);
@@ -2829,5 +4497,5 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
{
- inline_set_const(addr, executable_offset, new_constant, 1);
+ inline_set_const(addr, executable_offset, (sljit_uw)new_constant, 1);
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c
index 3f0f5fcc30..b268582f42 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c
@@ -48,92 +48,143 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 8] = {
};
static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
- 0, 0, 1, 2, 3, 4, 5, 6, 7
+ 0, 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 15, 14, 13, 12, 11, 10, 9, 8, 30, 31
};
-#define W_OP (1u << 31)
-#define RD(rd) (reg_map[rd])
-#define RT(rt) (reg_map[rt])
-#define RN(rn) (reg_map[rn] << 5)
-#define RT2(rt2) (reg_map[rt2] << 10)
-#define RM(rm) (reg_map[rm] << 16)
-#define VD(vd) (freg_map[vd])
-#define VT(vt) (freg_map[vt])
-#define VN(vn) (freg_map[vn] << 5)
-#define VM(vm) (freg_map[vm] << 16)
+#define W_OP ((sljit_ins)1 << 31)
+#define RD(rd) ((sljit_ins)reg_map[rd])
+#define RT(rt) ((sljit_ins)reg_map[rt])
+#define RN(rn) ((sljit_ins)reg_map[rn] << 5)
+#define RT2(rt2) ((sljit_ins)reg_map[rt2] << 10)
+#define RM(rm) ((sljit_ins)reg_map[rm] << 16)
+#define VD(vd) ((sljit_ins)freg_map[vd])
+#define VT(vt) ((sljit_ins)freg_map[vt])
+#define VT2(vt) ((sljit_ins)freg_map[vt] << 10)
+#define VN(vn) ((sljit_ins)freg_map[vn] << 5)
+#define VM(vm) ((sljit_ins)freg_map[vm] << 16)
/* --------------------------------------------------------------------- */
/* Instrucion forms */
/* --------------------------------------------------------------------- */
-#define ADC 0x9a000000
-#define ADD 0x8b000000
-#define ADDE 0x8b200000
-#define ADDI 0x91000000
-#define AND 0x8a000000
-#define ANDI 0x92000000
-#define ASRV 0x9ac02800
-#define B 0x14000000
-#define B_CC 0x54000000
-#define BL 0x94000000
-#define BLR 0xd63f0000
-#define BR 0xd61f0000
-#define BRK 0xd4200000
-#define CBZ 0xb4000000
-#define CLZ 0xdac01000
-#define CSEL 0x9a800000
-#define CSINC 0x9a800400
-#define EOR 0xca000000
-#define EORI 0xd2000000
-#define FABS 0x1e60c000
-#define FADD 0x1e602800
-#define FCMP 0x1e602000
-#define FCVT 0x1e224000
-#define FCVTZS 0x9e780000
-#define FDIV 0x1e601800
-#define FMOV 0x1e604000
-#define FMUL 0x1e600800
-#define FNEG 0x1e614000
-#define FSUB 0x1e603800
-#define LDRI 0xf9400000
-#define LDP 0xa9400000
-#define LDP_PRE 0xa9c00000
-#define LDR_PRE 0xf8400c00
-#define LSLV 0x9ac02000
-#define LSRV 0x9ac02400
-#define MADD 0x9b000000
-#define MOVK 0xf2800000
-#define MOVN 0x92800000
-#define MOVZ 0xd2800000
-#define NOP 0xd503201f
-#define ORN 0xaa200000
-#define ORR 0xaa000000
-#define ORRI 0xb2000000
-#define RET 0xd65f0000
-#define SBC 0xda000000
-#define SBFM 0x93000000
-#define SCVTF 0x9e620000
-#define SDIV 0x9ac00c00
-#define SMADDL 0x9b200000
-#define SMULH 0x9b403c00
-#define STP 0xa9000000
-#define STP_PRE 0xa9800000
-#define STRB 0x38206800
-#define STRBI 0x39000000
-#define STRI 0xf9000000
-#define STR_FI 0x3d000000
-#define STR_FR 0x3c206800
-#define STUR_FI 0x3c000000
-#define STURBI 0x38000000
-#define SUB 0xcb000000
-#define SUBI 0xd1000000
-#define SUBS 0xeb000000
-#define UBFM 0xd3000000
-#define UDIV 0x9ac00800
-#define UMULH 0x9bc03c00
-
-/* dest_reg is the absolute name of the register
- Useful for reordering instructions in the delay slot. */
+#define ADC 0x9a000000
+#define ADD 0x8b000000
+#define ADDE 0x8b200000
+#define ADDI 0x91000000
+#define AND 0x8a000000
+#define ANDI 0x92000000
+#define AND_v 0x0e201c00
+#define ASRV 0x9ac02800
+#define B 0x14000000
+#define B_CC 0x54000000
+#define BL 0x94000000
+#define BLR 0xd63f0000
+#define BR 0xd61f0000
+#define BRK 0xd4200000
+#define CAS 0xc8a07c00
+#define CASB 0x08a07c00
+#define CASH 0x48a07c00
+#define CBZ 0xb4000000
+#define CCMPI 0xfa400800
+#define CLZ 0xdac01000
+#define CSEL 0x9a800000
+#define CSINC 0x9a800400
+#define DUP_e 0x0e000400
+#define DUP_g 0x0e000c00
+#define EOR 0xca000000
+#define EOR_v 0x2e201c00
+#define EORI 0xd2000000
+#define EXTR 0x93c00000
+#define FABS 0x1e60c000
+#define FADD 0x1e602800
+#define FCMP 0x1e602000
+#define FCSEL 0x1e600c00
+#define FCVT 0x1e224000
+#define FCVTL 0x0e217800
+#define FCVTZS 0x9e780000
+#define FDIV 0x1e601800
+#define FMOV 0x1e604000
+#define FMOV_R 0x9e660000
+#define FMOV_I 0x1e601000
+#define FMUL 0x1e600800
+#define FNEG 0x1e614000
+#define FSUB 0x1e603800
+#define INS 0x4e001c00
+#define INS_e 0x6e000400
+#define LD1 0x0c407000
+#define LD1_s 0x0d400000
+#define LD1R 0x0d40c000
+#define LDRI 0xf9400000
+#define LDRI_F64 0xfd400000
+#define LDRI_POST 0xf8400400
+#define LDP 0xa9400000
+#define LDP_F64 0x6d400000
+#define LDP_POST 0xa8c00000
+#define LDR_PRE 0xf8400c00
+#define LDXR 0xc85f7c00
+#define LDXRB 0x085f7c00
+#define LDXRH 0x485f7c00
+#define LSLV 0x9ac02000
+#define LSRV 0x9ac02400
+#define MADD 0x9b000000
+#define MOVI 0x0f000400
+#define MOVK 0xf2800000
+#define MOVN 0x92800000
+#define MOVZ 0xd2800000
+#define NOP 0xd503201f
+#define ORN 0xaa200000
+#define ORR 0xaa000000
+#define ORR_v 0x0ea01c00
+#define ORRI 0xb2000000
+#define RBIT 0xdac00000
+#define RET 0xd65f0000
+#define REV 0xdac00c00
+#define REV16 0xdac00400
+#define RORV 0x9ac02c00
+#define SBC 0xda000000
+#define SBFM 0x93400000
+#define SCVTF 0x9e620000
+#define SDIV 0x9ac00c00
+#define SMADDL 0x9b200000
+#define SMOV 0x0e002c00
+#define SMULH 0x9b403c00
+#define SSHLL 0x0f00a400
+#define ST1 0x0c007000
+#define ST1_s 0x0d000000
+#define STP 0xa9000000
+#define STP_F64 0x6d000000
+#define STP_PRE 0xa9800000
+#define STRB 0x38206800
+#define STRBI 0x39000000
+#define STRI 0xf9000000
+#define STRI_F64 0xfd000000
+#define STR_FI 0x3d000000
+#define STR_FR 0x3c206800
+#define STUR_FI 0x3c000000
+#define STURBI 0x38000000
+#define STXR 0xc8007c00
+#define STXRB 0x8007c00
+#define STXRH 0x48007c00
+#define SUB 0xcb000000
+#define SUBI 0xd1000000
+#define SUBS 0xeb000000
+#define TBZ 0x36000000
+#define UBFM 0xd3400000
+#define UCVTF 0x9e630000
+#define UDIV 0x9ac00800
+#define UMOV 0x0e003c00
+#define UMULH 0x9bc03c00
+#define USHLL 0x2f00a400
+#define USHR 0x2f000400
+#define USRA 0x2f001400
+#define XTN 0x0e212800
+
+#define CSET (CSINC | RM(TMP_ZERO) | RN(TMP_ZERO))
+#define LDR (STRI | (1 << 22))
+#define LDRB (STRBI | (1 << 22))
+#define LDRH (LDRB | (1 << 30))
+#define MOV (ORR | RN(TMP_ZERO))
+
static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins)
{
sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
@@ -145,10 +196,10 @@ static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins)
static SLJIT_INLINE sljit_s32 emit_imm64_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_uw imm)
{
- FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5)));
- FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 16) & 0xffff) << 5) | (1 << 21)));
- FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 32) & 0xffff) << 5) | (2 << 21)));
- return push_inst(compiler, MOVK | RD(dst) | ((imm >> 48) << 5) | (3 << 21));
+ FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((sljit_ins)(imm & 0xffff) << 5)));
+ FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((sljit_ins)(imm >> 16) & 0xffff) << 5) | (1 << 21)));
+ FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((sljit_ins)(imm >> 32) & 0xffff) << 5) | (2 << 21)));
+ return push_inst(compiler, MOVK | RD(dst) | ((sljit_ins)(imm >> 48) << 5) | (3 << 21));
}
static SLJIT_INLINE sljit_sw detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset)
@@ -168,17 +219,17 @@ static SLJIT_INLINE sljit_sw detect_jump_type(struct sljit_jump *jump, sljit_ins
target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset;
}
- diff = (sljit_sw)target_addr - (sljit_sw)(code_ptr + 4) - executable_offset;
+ diff = (sljit_sw)target_addr - (sljit_sw)(code_ptr - 4) - executable_offset;
if (jump->flags & IS_COND) {
- diff += sizeof(sljit_ins);
+ diff += SSIZE_OF(ins);
if (diff <= 0xfffff && diff >= -0x100000) {
code_ptr[-5] ^= (jump->flags & IS_CBZ) ? (0x1 << 24) : 0x1;
jump->addr -= sizeof(sljit_ins);
jump->flags |= PATCH_COND;
return 5;
}
- diff -= sizeof(sljit_ins);
+ diff -= SSIZE_OF(ins);
}
if (diff <= 0x7ffffff && diff >= -0x8000000) {
@@ -231,8 +282,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
sljit_uw word_count;
sljit_uw next_addr;
sljit_sw executable_offset;
- sljit_uw addr;
- sljit_s32 dst;
+ sljit_sw addr;
+ sljit_u32 dst;
struct sljit_label *label;
struct sljit_jump *jump;
@@ -271,7 +322,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
/* These structures are ordered by their address. */
if (label && label->size == word_count) {
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
if (jump && jump->addr == word_count) {
@@ -291,8 +342,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
}
next_addr = compute_next_addr(label, jump, const_, put_label);
}
- code_ptr ++;
- word_count ++;
+ code_ptr++;
+ word_count++;
} while (buf_ptr < buf_end);
buf = buf->next;
@@ -300,7 +351,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (label && label->size == word_count) {
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
@@ -313,58 +364,58 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
jump = compiler->jumps;
while (jump) {
do {
- addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
+ addr = (sljit_sw)((jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target);
buf_ptr = (sljit_ins *)jump->addr;
if (jump->flags & PATCH_B) {
- addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
- SLJIT_ASSERT((sljit_sw)addr <= 0x1ffffff && (sljit_sw)addr >= -0x2000000);
- buf_ptr[0] = ((jump->flags & IS_BL) ? BL : B) | (addr & 0x3ffffff);
+ addr = (addr - (sljit_sw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
+ SLJIT_ASSERT(addr <= 0x1ffffff && addr >= -0x2000000);
+ buf_ptr[0] = ((jump->flags & IS_BL) ? BL : B) | (sljit_ins)(addr & 0x3ffffff);
if (jump->flags & IS_COND)
buf_ptr[-1] -= (4 << 5);
break;
}
if (jump->flags & PATCH_COND) {
- addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
- SLJIT_ASSERT((sljit_sw)addr <= 0x3ffff && (sljit_sw)addr >= -0x40000);
- buf_ptr[0] = (buf_ptr[0] & ~0xffffe0) | ((addr & 0x7ffff) << 5);
+ addr = (addr - (sljit_sw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
+ SLJIT_ASSERT(addr <= 0x3ffff && addr >= -0x40000);
+ buf_ptr[0] = (buf_ptr[0] & ~(sljit_ins)0xffffe0) | (sljit_ins)((addr & 0x7ffff) << 5);
break;
}
- SLJIT_ASSERT((jump->flags & (PATCH_ABS48 | PATCH_ABS64)) || addr <= 0xffffffffl);
- SLJIT_ASSERT((jump->flags & PATCH_ABS64) || addr <= 0xffffffffffffl);
+ SLJIT_ASSERT((jump->flags & (PATCH_ABS48 | PATCH_ABS64)) || (sljit_uw)addr <= (sljit_uw)0xffffffff);
+ SLJIT_ASSERT((jump->flags & PATCH_ABS64) || (sljit_uw)addr <= (sljit_uw)0xffffffffffff);
dst = buf_ptr[0] & 0x1f;
- buf_ptr[0] = MOVZ | dst | ((addr & 0xffff) << 5);
- buf_ptr[1] = MOVK | dst | (((addr >> 16) & 0xffff) << 5) | (1 << 21);
+ buf_ptr[0] = MOVZ | dst | (((sljit_ins)addr & 0xffff) << 5);
+ buf_ptr[1] = MOVK | dst | (((sljit_ins)(addr >> 16) & 0xffff) << 5) | (1 << 21);
if (jump->flags & (PATCH_ABS48 | PATCH_ABS64))
- buf_ptr[2] = MOVK | dst | (((addr >> 32) & 0xffff) << 5) | (2 << 21);
+ buf_ptr[2] = MOVK | dst | (((sljit_ins)(addr >> 32) & 0xffff) << 5) | (2 << 21);
if (jump->flags & PATCH_ABS64)
- buf_ptr[3] = MOVK | dst | (((addr >> 48) & 0xffff) << 5) | (3 << 21);
+ buf_ptr[3] = MOVK | dst | ((sljit_ins)(addr >> 48) << 5) | (3 << 21);
} while (0);
jump = jump->next;
}
put_label = compiler->put_labels;
while (put_label) {
- addr = put_label->label->addr;
- buf_ptr = (sljit_ins *)put_label->addr;
+ addr = (sljit_sw)put_label->label->addr;
+ buf_ptr = (sljit_ins*)put_label->addr;
- buf_ptr[0] |= (addr & 0xffff) << 5;
- buf_ptr[1] |= ((addr >> 16) & 0xffff) << 5;
+ buf_ptr[0] |= ((sljit_ins)addr & 0xffff) << 5;
+ buf_ptr[1] |= ((sljit_ins)(addr >> 16) & 0xffff) << 5;
if (put_label->flags >= 1)
- buf_ptr[2] |= ((addr >> 32) & 0xffff) << 5;
+ buf_ptr[2] |= ((sljit_ins)(addr >> 32) & 0xffff) << 5;
if (put_label->flags >= 2)
- buf_ptr[3] |= ((addr >> 48) & 0xffff) << 5;
+ buf_ptr[3] |= (sljit_ins)(addr >> 48) << 5;
put_label = put_label->next;
}
compiler->error = SLJIT_ERR_COMPILED;
compiler->executable_offset = executable_offset;
- compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins);
+ compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_ins);
code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
@@ -378,16 +429,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
{
switch (feature_type) {
case SLJIT_HAS_FPU:
+ case SLJIT_HAS_SIMD:
#ifdef SLJIT_IS_FPU_AVAILABLE
- return SLJIT_IS_FPU_AVAILABLE;
+ return (SLJIT_IS_FPU_AVAILABLE) != 0;
#else
/* Available by default. */
return 1;
#endif
case SLJIT_HAS_CLZ:
+ case SLJIT_HAS_CTZ:
+ case SLJIT_HAS_REV:
+ case SLJIT_HAS_ROT:
case SLJIT_HAS_CMOV:
case SLJIT_HAS_PREFETCH:
+ case SLJIT_HAS_COPY_F32:
+ case SLJIT_HAS_COPY_F64:
+ case SLJIT_HAS_ATOMIC:
return 1;
default:
@@ -395,6 +453,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
}
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
+{
+ switch (type) {
+ case SLJIT_UNORDERED_OR_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ return 2;
+ }
+
+ return 0;
+}
+
/* --------------------------------------------------------------------- */
/* Core code generator functions. */
/* --------------------------------------------------------------------- */
@@ -426,11 +495,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
value >>= 1; \
}
-#define LOGICAL_IMM_CHECK 0x100
+#define LOGICAL_IMM_CHECK (sljit_ins)0x100
-static sljit_ins logical_imm(sljit_sw imm, sljit_s32 len)
+static sljit_ins logical_imm(sljit_sw imm, sljit_u32 len)
{
- sljit_s32 negated, ones, right;
+ sljit_s32 negated;
+ sljit_u32 ones, right;
sljit_uw mask, uimm;
sljit_ins ins;
@@ -497,30 +567,30 @@ static sljit_ins logical_imm(sljit_sw imm, sljit_s32 len)
static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw simm)
{
sljit_uw imm = (sljit_uw)simm;
- sljit_s32 i, zeros, ones, first;
+ sljit_u32 i, zeros, ones, first;
sljit_ins bitmask;
/* Handling simple immediates first. */
if (imm <= 0xffff)
- return push_inst(compiler, MOVZ | RD(dst) | (imm << 5));
+ return push_inst(compiler, MOVZ | RD(dst) | ((sljit_ins)imm << 5));
if (simm < 0 && simm >= -0x10000)
- return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5));
+ return push_inst(compiler, MOVN | RD(dst) | (((sljit_ins)~imm & 0xffff) << 5));
if (imm <= 0xffffffffl) {
if ((imm & 0xffff) == 0)
- return push_inst(compiler, MOVZ | RD(dst) | ((imm >> 16) << 5) | (1 << 21));
+ return push_inst(compiler, MOVZ | RD(dst) | ((sljit_ins)(imm >> 16) << 5) | (1 << 21));
if ((imm & 0xffff0000l) == 0xffff0000)
- return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff) << 5));
+ return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | (((sljit_ins)~imm & 0xffff) << 5));
if ((imm & 0xffff) == 0xffff)
- return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
+ return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | (((sljit_ins)~imm & 0xffff0000u) >> (16 - 5)) | (1 << 21));
bitmask = logical_imm(simm, 16);
if (bitmask != 0)
return push_inst(compiler, (ORRI ^ W_OP) | RD(dst) | RN(TMP_ZERO) | bitmask);
- FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5)));
- return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
+ FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | (((sljit_ins)imm & 0xffff) << 5)));
+ return push_inst(compiler, MOVK | RD(dst) | (((sljit_ins)imm & 0xffff0000u) >> (16 - 5)) | (1 << 21));
}
bitmask = logical_imm(simm, 32);
@@ -529,10 +599,10 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst,
if (simm < 0 && simm >= -0x100000000l) {
if ((imm & 0xffff) == 0xffff)
- return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
+ return push_inst(compiler, MOVN | RD(dst) | (((sljit_ins)~imm & 0xffff0000u) >> (16 - 5)) | (1 << 21));
- FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5)));
- return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
+ FAIL_IF(push_inst(compiler, MOVN | RD(dst) | (((sljit_ins)~imm & 0xffff) << 5)));
+ return push_inst(compiler, MOVK | RD(dst) | (((sljit_ins)imm & 0xffff0000u) >> (16 - 5)) | (1 << 21));
}
/* A large amount of number can be constructed from ORR and MOVx, but computing them is costly. */
@@ -558,10 +628,10 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst,
}
if (first) {
first = 0;
- FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
+ FAIL_IF(push_inst(compiler, MOVN | RD(dst) | (((sljit_ins)simm & 0xffff) << 5) | (i << 21)));
}
else
- FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((~simm & 0xffff) << 5) | (i << 21)));
+ FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((sljit_ins)~simm & 0xffff) << 5) | (i << 21)));
simm >>= 16;
}
return SLJIT_SUCCESS;
@@ -574,10 +644,10 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst,
}
if (first) {
first = 0;
- FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
+ FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | (((sljit_ins)simm & 0xffff) << 5) | (i << 21)));
}
else
- FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
+ FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((sljit_ins)simm & 0xffff) << 5) | (i << 21)));
simm >>= 16;
}
return SLJIT_SUCCESS;
@@ -619,13 +689,18 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
}
if (flags & (ARG1_IMM | ARG2_IMM)) {
- reg = (flags & ARG2_IMM) ? arg1 : arg2;
+ reg = (sljit_s32)((flags & ARG2_IMM) ? arg1 : arg2);
imm = (flags & ARG2_IMM) ? arg2 : arg1;
switch (op) {
case SLJIT_MUL:
- case SLJIT_NEG:
case SLJIT_CLZ:
+ case SLJIT_CTZ:
+ case SLJIT_REV:
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
case SLJIT_ADDC:
case SLJIT_SUBC:
/* No form with immediate operand (except imm 0, which
@@ -634,45 +709,44 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
case SLJIT_MOV:
SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG1);
return load_immediate(compiler, dst, imm);
- case SLJIT_NOT:
- SLJIT_ASSERT(flags & ARG2_IMM);
- FAIL_IF(load_immediate(compiler, dst, (flags & INT_OP) ? (~imm & 0xffffffff) : ~imm));
- goto set_flags;
case SLJIT_SUB:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
if (flags & ARG1_IMM)
break;
imm = -imm;
/* Fall through. */
case SLJIT_ADD:
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
+ if (op != SLJIT_SUB)
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+
if (imm == 0) {
CHECK_FLAGS(1 << 29);
return push_inst(compiler, ((op == SLJIT_ADD ? ADDI : SUBI) ^ inv_bits) | RD(dst) | RN(reg));
}
if (imm > 0 && imm <= 0xfff) {
CHECK_FLAGS(1 << 29);
- return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | (imm << 10));
+ return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((sljit_ins)imm << 10));
}
nimm = -imm;
if (nimm > 0 && nimm <= 0xfff) {
CHECK_FLAGS(1 << 29);
- return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | (nimm << 10));
+ return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((sljit_ins)nimm << 10));
}
if (imm > 0 && imm <= 0xffffff && !(imm & 0xfff)) {
CHECK_FLAGS(1 << 29);
- return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22));
+ return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | (((sljit_ins)imm >> 12) << 10) | (1 << 22));
}
if (nimm > 0 && nimm <= 0xffffff && !(nimm & 0xfff)) {
CHECK_FLAGS(1 << 29);
- return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22));
+ return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | (((sljit_ins)nimm >> 12) << 10) | (1 << 22));
}
if (imm > 0 && imm <= 0xffffff && !(flags & SET_FLAGS)) {
- FAIL_IF(push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22)));
- return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(dst) | ((imm & 0xfff) << 10));
+ FAIL_IF(push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | (((sljit_ins)imm >> 12) << 10) | (1 << 22)));
+ return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(dst) | (((sljit_ins)imm & 0xfff) << 10));
}
if (nimm > 0 && nimm <= 0xffffff && !(flags & SET_FLAGS)) {
- FAIL_IF(push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22)));
- return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(dst) | ((nimm & 0xfff) << 10));
+ FAIL_IF(push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | (((sljit_ins)nimm >> 12) << 10) | (1 << 22)));
+ return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(dst) | (((sljit_ins)nimm & 0xfff) << 10));
}
break;
case SLJIT_AND:
@@ -681,8 +755,13 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
break;
CHECK_FLAGS(3 << 29);
return push_inst(compiler, (ANDI ^ inv_bits) | RD(dst) | RN(reg) | inst_bits);
- case SLJIT_OR:
case SLJIT_XOR:
+ if (imm == -1) {
+ FAIL_IF(push_inst(compiler, (ORN ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(reg)));
+ goto set_flags;
+ }
+ /* fallthrough */
+ case SLJIT_OR:
inst_bits = logical_imm(imm, LOGICAL_IMM_CHECK | ((flags & INT_OP) ? 16 : 32));
if (!inst_bits)
break;
@@ -693,32 +772,52 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
FAIL_IF(push_inst(compiler, (inst_bits ^ inv_bits) | RD(dst) | RN(reg)));
goto set_flags;
case SLJIT_SHL:
+ case SLJIT_MSHL:
if (flags & ARG1_IMM)
break;
+
if (flags & INT_OP) {
imm &= 0x1f;
- FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | ((-imm & 0x1f) << 16) | ((31 - imm) << 10)));
- }
- else {
+ inst_bits = (((sljit_ins)-imm & 0x1f) << 16) | ((31 - (sljit_ins)imm) << 10);
+ } else {
imm &= 0x3f;
- FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | ((-imm & 0x3f) << 16) | ((63 - imm) << 10)));
+ inst_bits = ((sljit_ins)1 << 22) | (((sljit_ins)-imm & 0x3f) << 16) | ((63 - (sljit_ins)imm) << 10);
}
+
+ inv_bits |= inv_bits >> 9;
+ FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | inst_bits));
goto set_flags;
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
case SLJIT_ASHR:
+ case SLJIT_MASHR:
if (flags & ARG1_IMM)
break;
- if (op == SLJIT_ASHR)
+
+ inv_bits |= inv_bits >> 9;
+ if (op >= SLJIT_ASHR)
inv_bits |= 1 << 30;
+
if (flags & INT_OP) {
imm &= 0x1f;
- FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (imm << 16) | (31 << 10)));
- }
- else {
+ inst_bits = ((sljit_ins)imm << 16) | (31 << 10);
+ } else {
imm &= 0x3f;
- FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | (imm << 16) | (63 << 10)));
+ inst_bits = ((sljit_ins)1 << 22) | ((sljit_ins)imm << 16) | (63 << 10);
}
+
+ FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | inst_bits));
goto set_flags;
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
+ if (flags & ARG1_IMM)
+ break;
+
+ if (op == SLJIT_ROTL)
+ imm = -imm;
+
+ imm &= (flags & INT_OP) ? 0x1f : 0x3f;
+ return push_inst(compiler, (EXTR ^ (inv_bits | (inv_bits >> 9))) | RD(dst) | RN(arg1) | RM(arg1) | ((sljit_ins)imm << 10));
default:
SLJIT_UNREACHABLE();
break;
@@ -749,58 +848,74 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
if (dst == arg2)
return SLJIT_SUCCESS;
- return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(arg2));
+ return push_inst(compiler, MOV | RD(dst) | RM(arg2));
case SLJIT_MOV_U8:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
- return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (7 << 10));
+ inv_bits |= inv_bits >> 9;
+ return push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg2) | (7 << 10));
case SLJIT_MOV_S8:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
- if (!(flags & INT_OP))
- inv_bits |= 1 << 22;
+ inv_bits |= inv_bits >> 9;
return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (7 << 10));
case SLJIT_MOV_U16:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
- return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (15 << 10));
+ inv_bits |= inv_bits >> 9;
+ return push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg2) | (15 << 10));
case SLJIT_MOV_S16:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
- if (!(flags & INT_OP))
- inv_bits |= 1 << 22;
+ inv_bits |= inv_bits >> 9;
return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (15 << 10));
- case SLJIT_MOV_U32:
+ case SLJIT_MOV32:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
- if ((flags & INT_OP) && dst == arg2)
+ if (dst == arg2)
return SLJIT_SUCCESS;
- return push_inst(compiler, (ORR ^ W_OP) | RD(dst) | RN(TMP_ZERO) | RM(arg2));
+ /* fallthrough */
+ case SLJIT_MOV_U32:
+ SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
+ return push_inst(compiler, (MOV ^ W_OP) | RD(dst) | RM(arg2));
case SLJIT_MOV_S32:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
- if ((flags & INT_OP) && dst == arg2)
- return SLJIT_SUCCESS;
return push_inst(compiler, SBFM | (1 << 22) | RD(dst) | RN(arg2) | (31 << 10));
- case SLJIT_NOT:
- SLJIT_ASSERT(arg1 == TMP_REG1);
- FAIL_IF(push_inst(compiler, (ORN ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2)));
- break; /* Set flags. */
- case SLJIT_NEG:
- SLJIT_ASSERT(arg1 == TMP_REG1);
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
- if (flags & SET_FLAGS)
- inv_bits |= 1 << 29;
- return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2));
case SLJIT_CLZ:
SLJIT_ASSERT(arg1 == TMP_REG1);
return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(arg2));
+ case SLJIT_CTZ:
+ SLJIT_ASSERT(arg1 == TMP_REG1);
+ FAIL_IF(push_inst(compiler, (RBIT ^ inv_bits) | RD(dst) | RN(arg2)));
+ return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(dst));
+ case SLJIT_REV:
+ SLJIT_ASSERT(arg1 == TMP_REG1);
+ inv_bits |= inv_bits >> 21;
+ return push_inst(compiler, (REV ^ inv_bits) | RD(dst) | RN(arg2));
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ SLJIT_ASSERT(arg1 == TMP_REG1 && dst != TMP_REG2);
+ FAIL_IF(push_inst(compiler, (REV16 ^ (sljit_ins)0x80000000) | RD(dst) | RN(arg2)));
+ if (dst == TMP_REG1 || (arg2 == TMP_REG2 && op == SLJIT_REV_U16))
+ return SLJIT_SUCCESS;
+ inv_bits |= inv_bits >> 9;
+ return push_inst(compiler, ((op == SLJIT_REV_U16 ? UBFM : SBFM) ^ inv_bits) | RD(dst) | RN(dst) | (15 << 10));
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+ SLJIT_ASSERT(arg1 == TMP_REG1 && dst != TMP_REG2);
+ FAIL_IF(push_inst(compiler, (REV ^ (sljit_ins)0x80000400) | RD(dst) | RN(arg2)));
+ if (op == SLJIT_REV_U32 || dst == TMP_REG1)
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, SBFM | (1 << 22) | RD(dst) | RN(dst) | (31 << 10));
case SLJIT_ADD:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
CHECK_FLAGS(1 << 29);
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
return push_inst(compiler, (ADD ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
case SLJIT_ADDC:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
CHECK_FLAGS(1 << 29);
return push_inst(compiler, (ADC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
case SLJIT_SUB:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
CHECK_FLAGS(1 << 29);
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
case SLJIT_SUBC:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
CHECK_FLAGS(1 << 29);
return push_inst(compiler, (SBC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
case SLJIT_MUL:
@@ -825,14 +940,23 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
FAIL_IF(push_inst(compiler, (EOR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
break; /* Set flags. */
case SLJIT_SHL:
+ case SLJIT_MSHL:
FAIL_IF(push_inst(compiler, (LSLV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
break; /* Set flags. */
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
FAIL_IF(push_inst(compiler, (LSRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
break; /* Set flags. */
case SLJIT_ASHR:
+ case SLJIT_MASHR:
FAIL_IF(push_inst(compiler, (ASRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
break; /* Set flags. */
+ case SLJIT_ROTL:
+ FAIL_IF(push_inst(compiler, (SUB ^ inv_bits) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(arg2)));
+ arg2 = TMP_REG2;
+ /* fallthrough */
+ case SLJIT_ROTR:
+ return push_inst(compiler, (RORV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
default:
SLJIT_UNREACHABLE();
return SLJIT_SUCCESS;
@@ -852,7 +976,7 @@ set_flags:
#define INT_SIZE 0x2
#define WORD_SIZE 0x3
-#define MEM_SIZE_SHIFT(flags) ((flags) & 0x3)
+#define MEM_SIZE_SHIFT(flags) ((sljit_ins)(flags) & 0x3)
static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg,
sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
@@ -872,35 +996,50 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, s
return push_inst(compiler, STRB | type | RT(reg)
| RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
- FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | ((sljit_ins)argw << 10)));
return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg));
}
arg &= REG_MASK;
- if (arg == SLJIT_UNUSED) {
+ if (!arg) {
FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~(0xfff << shift)));
argw = (argw >> shift) & 0xfff;
- return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
+ return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | ((sljit_ins)argw << 10));
}
- if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
- if ((argw >> shift) <= 0xfff) {
- return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | (argw << (10 - shift)));
- }
+ if ((argw & ((1 << shift) - 1)) == 0) {
+ if (argw >= 0) {
+ if ((argw >> shift) <= 0xfff)
+ return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | ((sljit_ins)argw << (10 - shift)));
- if (argw <= 0xffffff) {
- FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | ((argw >> 12) << 10)));
+ if (argw <= 0xffffff) {
+ FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)argw >> 12) << 10)));
- argw = ((argw & 0xfff) >> shift);
- return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
+ argw = ((argw & 0xfff) >> shift);
+ return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | ((sljit_ins)argw << 10));
+ }
+ } else if (argw < -256 && argw >= -0xfff000) {
+ FAIL_IF(push_inst(compiler, SUBI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)(-argw + 0xfff) >> 12) << 10)));
+ argw = ((0x1000 + argw) & 0xfff) >> shift;
+ return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | ((sljit_ins)argw << 10));
}
}
- if (argw <= 255 && argw >= -256)
- return push_inst(compiler, STURBI | type | RT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
+ if (argw <= 0xff && argw >= -0x100)
+ return push_inst(compiler, STURBI | type | RT(reg) | RN(arg) | (((sljit_ins)argw & 0x1ff) << 12));
+
+ if (argw >= 0) {
+ if (argw <= 0xfff0ff && ((argw + 0x100) & 0xfff) <= 0x1ff) {
+ FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)argw >> 12) << 10)));
+ return push_inst(compiler, STURBI | type | RT(reg) | RN(tmp_reg) | (((sljit_ins)argw & 0x1ff) << 12));
+ }
+ } else if (argw >= -0xfff100 && ((-argw + 0xff) & 0xfff) <= 0x1ff) {
+ FAIL_IF(push_inst(compiler, SUBI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)-argw >> 12) << 10)));
+ return push_inst(compiler, STURBI | type | RT(reg) | RN(tmp_reg) | (((sljit_ins)argw & 0x1ff) << 12));
+ }
FAIL_IF(load_immediate(compiler, tmp_reg, argw));
@@ -915,39 +1054,44 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 args, i, tmp, offs, prev, saved_regs_size;
+ sljit_s32 prev, fprev, saved_regs_size, i, tmp;
+ sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
+ sljit_ins offs;
CHECK_ERROR();
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
- if (saved_regs_size & 0x8)
- saved_regs_size += sizeof(sljit_sw);
+ saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 2);
+ saved_regs_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
- local_size = (local_size + 15) & ~0xf;
- compiler->local_size = local_size + saved_regs_size;
+ local_size = (local_size + saved_regs_size + 0xf) & ~0xf;
+ compiler->local_size = local_size;
- FAIL_IF(push_inst(compiler, STP_PRE | RT(TMP_FP) | RT2(TMP_LR)
- | RN(SLJIT_SP) | ((-(saved_regs_size >> 3) & 0x7f) << 15)));
+ if (local_size <= 512) {
+ FAIL_IF(push_inst(compiler, STP_PRE | RT(TMP_FP) | RT2(TMP_LR)
+ | RN(SLJIT_SP) | (sljit_ins)((-(local_size >> 3) & 0x7f) << 15)));
+ offs = (sljit_ins)(local_size - 2 * SSIZE_OF(sw)) << (15 - 3);
+ local_size = 0;
+ } else {
+ saved_regs_size = ((saved_regs_size - 2 * SSIZE_OF(sw)) + 0xf) & ~0xf;
-#ifdef _WIN32
- if (local_size >= 4096)
- FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (1 << 10) | (1 << 22)));
- else if (local_size > 256)
- FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (local_size << 10)));
-#endif
+ FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((sljit_ins)saved_regs_size << 10)));
+ offs = (sljit_ins)(saved_regs_size - 2 * SSIZE_OF(sw)) << (15 - 3);
+ local_size -= saved_regs_size;
+ SLJIT_ASSERT(local_size > 0);
+ }
- tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
prev = -1;
- offs = 2 << 15;
- for (i = SLJIT_S0; i >= tmp; i--) {
+
+ tmp = SLJIT_S0 - saveds;
+ for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) {
if (prev == -1) {
prev = i;
continue;
}
FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
- offs += 2 << 15;
+ offs -= (sljit_ins)2 << 15;
prev = -1;
}
@@ -957,84 +1101,128 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
continue;
}
FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
- offs += 2 << 15;
+ offs -= (sljit_ins)2 << 15;
prev = -1;
}
+ fprev = -1;
+
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ if (fprev == -1) {
+ fprev = i;
+ continue;
+ }
+ FAIL_IF(push_inst(compiler, STP_F64 | VT(fprev) | VT2(i) | RN(SLJIT_SP) | offs));
+ offs -= (sljit_ins)2 << 15;
+ fprev = -1;
+ }
+
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ if (fprev == -1) {
+ fprev = i;
+ continue;
+ }
+ FAIL_IF(push_inst(compiler, STP_F64 | VT(fprev) | VT2(i) | RN(SLJIT_SP) | offs));
+ offs -= (sljit_ins)2 << 15;
+ fprev = -1;
+ }
+
+ if (fprev != -1)
+ FAIL_IF(push_inst(compiler, STRI_F64 | VT(fprev) | RN(SLJIT_SP) | (offs >> 5) | (1 << 10)));
+
if (prev != -1)
- FAIL_IF(push_inst(compiler, STRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5)));
+ FAIL_IF(push_inst(compiler, STRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5) | ((fprev == -1) ? (1 << 10) : 0)));
- FAIL_IF(push_inst(compiler, ADDI | RD(TMP_FP) | RN(SLJIT_SP) | (0 << 10)));
+#ifdef _WIN32
+ if (local_size > 4096)
+ FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 10) | (1 << 22)));
+#endif /* _WIN32 */
- args = get_arg_count(arg_types);
+ if (!(options & SLJIT_ENTER_REG_ARG)) {
+ arg_types >>= SLJIT_ARG_SHIFT;
+ saved_arg_count = 0;
+ tmp = SLJIT_R0;
- if (args >= 1)
- FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0) | RN(TMP_ZERO) | RM(SLJIT_R0)));
- if (args >= 2)
- FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S1) | RN(TMP_ZERO) | RM(SLJIT_R1)));
- if (args >= 3)
- FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S2) | RN(TMP_ZERO) | RM(SLJIT_R2)));
+ while (arg_types) {
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S0 - saved_arg_count) | RM(tmp)));
+ saved_arg_count++;
+ }
+ tmp++;
+ }
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
+ }
#ifdef _WIN32
- if (local_size >= 4096) {
+ if (local_size > 4096) {
if (local_size < 4 * 4096) {
/* No need for a loop. */
- if (local_size >= 2 * 4096) {
- FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
- FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
- local_size -= 4096;
- }
if (local_size >= 2 * 4096) {
- FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
- FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
- local_size -= 4096;
- }
+ if (local_size >= 3 * 4096) {
+ FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(SLJIT_SP)));
+ FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 10) | (1 << 22)));
+ }
- FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
- local_size -= 4096;
+ FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(SLJIT_SP)));
+ FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 10) | (1 << 22)));
+ }
}
else {
- FAIL_IF(push_inst(compiler, MOVZ | RD(TMP_REG2) | (((local_size >> 12) - 1) << 5)));
- FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
- FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
- FAIL_IF(push_inst(compiler, SUBI | (1 << 29) | RD(TMP_REG2) | RN(TMP_REG2) | (1 << 10)));
+ FAIL_IF(push_inst(compiler, MOVZ | RD(TMP_REG1) | ((((sljit_ins)local_size >> 12) - 1) << 5)));
+ FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(SLJIT_SP)));
+ FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 10) | (1 << 22)));
+ FAIL_IF(push_inst(compiler, SUBI | (1 << 29) | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10)));
FAIL_IF(push_inst(compiler, B_CC | ((((sljit_ins) -3) & 0x7ffff) << 5) | 0x1 /* not-equal */));
- FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
-
- local_size &= 0xfff;
}
- if (local_size > 256) {
- FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (local_size << 10)));
- FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
- }
- else if (local_size > 0)
- FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(TMP_REG1) | ((-local_size & 0x1ff) << 12)));
+ local_size &= 0xfff;
- FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10)));
+ if (local_size > 0)
+ FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(SLJIT_SP)));
+ else
+ FAIL_IF(push_inst(compiler, STP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
}
- else if (local_size > 256) {
- FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
- FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10)));
+
+ if (local_size > 0) {
+ if (local_size <= 512)
+ FAIL_IF(push_inst(compiler, STP_PRE | RT(TMP_FP) | RT2(TMP_LR)
+ | RN(SLJIT_SP) | (sljit_ins)((-(local_size >> 3) & 0x7f) << 15)));
+ else {
+ if (local_size >= 4096)
+ local_size = (1 << (22 - 10));
+
+ FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((sljit_ins)local_size << 10)));
+ FAIL_IF(push_inst(compiler, STP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
+ }
}
- else if (local_size > 0)
- FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(SLJIT_SP) | ((-local_size & 0x1ff) << 12)));
#else /* !_WIN32 */
/* The local_size does not include saved registers size. */
- if (local_size > 0xfff) {
- FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22)));
- local_size &= 0xfff;
+ if (local_size != 0) {
+ if (local_size > 0xfff) {
+ FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (((sljit_ins)local_size >> 12) << 10) | (1 << 22)));
+ local_size &= 0xfff;
+ }
+
+ if (local_size > 512 || local_size == 0) {
+ if (local_size != 0)
+ FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((sljit_ins)local_size << 10)));
+
+ FAIL_IF(push_inst(compiler, STP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
+ } else
+ FAIL_IF(push_inst(compiler, STP_PRE | RT(TMP_FP) | RT2(TMP_LR)
+ | RN(SLJIT_SP) | (sljit_ins)((-(local_size >> 3) & 0x7f) << 15)));
}
- if (local_size != 0)
- FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10)));
#endif /* _WIN32 */
- return SLJIT_SUCCESS;
+ return push_inst(compiler, ADDI | RD(TMP_FP) | RN(SLJIT_SP) | (0 << 10));
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
@@ -1047,58 +1235,58 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
- if (saved_regs_size & 0x8)
- saved_regs_size += sizeof(sljit_sw);
+ saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 2);
+ saved_regs_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
- compiler->local_size = saved_regs_size + ((local_size + 15) & ~0xf);
+ compiler->local_size = (local_size + saved_regs_size + 0xf) & ~0xf;
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to)
{
- sljit_s32 local_size;
- sljit_s32 i, tmp, offs, prev, saved_regs_size;
-
- CHECK_ERROR();
- CHECK(check_sljit_emit_return(compiler, op, src, srcw));
-
- FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
-
- saved_regs_size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 2);
- if (saved_regs_size & 0x8)
- saved_regs_size += sizeof(sljit_sw);
-
- local_size = compiler->local_size - saved_regs_size;
-
- /* Load LR as early as possible. */
- if (local_size == 0)
- FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
- else if (local_size < 63 * sizeof(sljit_sw)) {
- FAIL_IF(push_inst(compiler, LDP_PRE | RT(TMP_FP) | RT2(TMP_LR)
- | RN(SLJIT_SP) | (local_size << (15 - 3))));
- }
- else {
+ sljit_s32 local_size, prev, fprev, i, tmp;
+ sljit_ins offs;
+
+ local_size = compiler->local_size;
+
+ if (!is_return_to) {
+ if (local_size > 512 && local_size <= 512 + 496) {
+ FAIL_IF(push_inst(compiler, LDP_POST | RT(TMP_FP) | RT2(TMP_LR)
+ | RN(SLJIT_SP) | ((sljit_ins)(local_size - 512) << (15 - 3))));
+ local_size = 512;
+ } else
+ FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
+ } else {
+ if (local_size > 512 && local_size <= 512 + 248) {
+ FAIL_IF(push_inst(compiler, LDRI_POST | RT(TMP_FP) | RN(SLJIT_SP) | ((sljit_ins)(local_size - 512) << 12)));
+ local_size = 512;
+ } else
+ FAIL_IF(push_inst(compiler, LDRI | RT(TMP_FP) | RN(SLJIT_SP) | 0));
+ }
+
+ if (local_size > 512) {
+ local_size -= 512;
if (local_size > 0xfff) {
- FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22)));
+ FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP)
+ | (((sljit_ins)local_size >> 12) << 10) | (1 << 22)));
local_size &= 0xfff;
}
- if (local_size)
- FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10)));
- FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
+ FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((sljit_ins)local_size << 10)));
+ local_size = 512;
}
- tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
+ offs = (sljit_ins)(local_size - 2 * SSIZE_OF(sw)) << (15 - 3);
prev = -1;
- offs = 2 << 15;
- for (i = SLJIT_S0; i >= tmp; i--) {
+
+ tmp = SLJIT_S0 - compiler->saveds;
+ for (i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); i > tmp; i--) {
if (prev == -1) {
prev = i;
continue;
}
FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
- offs += 2 << 15;
+ offs -= (sljit_ins)2 << 15;
prev = -1;
}
@@ -1108,25 +1296,83 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
continue;
}
FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
- offs += 2 << 15;
+ offs -= (sljit_ins)2 << 15;
prev = -1;
}
+ fprev = -1;
+
+ tmp = SLJIT_FS0 - compiler->fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ if (fprev == -1) {
+ fprev = i;
+ continue;
+ }
+ FAIL_IF(push_inst(compiler, LDP_F64 | VT(fprev) | VT2(i) | RN(SLJIT_SP) | offs));
+ offs -= (sljit_ins)2 << 15;
+ fprev = -1;
+ }
+
+ for (i = compiler->fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ if (fprev == -1) {
+ fprev = i;
+ continue;
+ }
+ FAIL_IF(push_inst(compiler, LDP_F64 | VT(fprev) | VT2(i) | RN(SLJIT_SP) | offs));
+ offs -= (sljit_ins)2 << 15;
+ fprev = -1;
+ }
+
+ if (fprev != -1)
+ FAIL_IF(push_inst(compiler, LDRI_F64 | VT(fprev) | RN(SLJIT_SP) | (offs >> 5) | (1 << 10)));
+
if (prev != -1)
- FAIL_IF(push_inst(compiler, LDRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5)));
+ FAIL_IF(push_inst(compiler, LDRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5) | ((fprev == -1) ? (1 << 10) : 0)));
+
+ /* This and the next call/jump instruction can be executed parallelly. */
+ return push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (sljit_ins)(local_size << 10));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
- /* These two can be executed in parallel. */
- FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (saved_regs_size << 10)));
return push_inst(compiler, RET | RN(TMP_LR));
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_to(compiler, src, srcw));
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ srcw = 0;
+ } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | RM(src)));
+ src = TMP_REG1;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 1));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
+}
+
/* --------------------------------------------------------------------- */
/* Operators */
/* --------------------------------------------------------------------- */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
{
- sljit_ins inv_bits = (op & SLJIT_I32_OP) ? W_OP : 0;
+ sljit_ins inv_bits = (op & SLJIT_32) ? W_OP : 0;
CHECK_ERROR();
CHECK(check_sljit_emit_op0(compiler, op));
@@ -1139,12 +1385,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
return push_inst(compiler, NOP);
case SLJIT_LMUL_UW:
case SLJIT_LMUL_SW:
- FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | RM(SLJIT_R0)));
FAIL_IF(push_inst(compiler, MADD | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULH : SMULH) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
case SLJIT_DIVMOD_UW:
case SLJIT_DIVMOD_SW:
- FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
+ FAIL_IF(push_inst(compiler, (MOV ^ inv_bits) | RD(TMP_REG1) | RM(SLJIT_R0)));
FAIL_IF(push_inst(compiler, ((op == SLJIT_DIVMOD_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1)));
FAIL_IF(push_inst(compiler, (MADD ^ inv_bits) | RD(SLJIT_R1) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
return push_inst(compiler, (SUB ^ inv_bits) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
@@ -1171,13 +1417,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src, srcw);
- dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
op = GET_OPCODE(op);
if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
/* Both operands are registers. */
if (dst_r != TMP_REG1 && FAST_IS_REG(src))
- return emit_op_imm(compiler, op | ((op_flags & SLJIT_I32_OP) ? INT_OP : 0), dst_r, TMP_REG1, src);
+ return emit_op_imm(compiler, op | ((op_flags & SLJIT_32) ? INT_OP : 0), dst_r, TMP_REG1, src);
switch (op) {
case SLJIT_MOV:
@@ -1186,32 +1432,33 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
break;
case SLJIT_MOV_U8:
mem_flags = BYTE_SIZE;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_u8)srcw;
break;
case SLJIT_MOV_S8:
mem_flags = BYTE_SIZE | SIGNED;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_s8)srcw;
break;
case SLJIT_MOV_U16:
mem_flags = HALF_SIZE;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_u16)srcw;
break;
case SLJIT_MOV_S16:
mem_flags = HALF_SIZE | SIGNED;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_s16)srcw;
break;
case SLJIT_MOV_U32:
mem_flags = INT_SIZE;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_u32)srcw;
break;
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
mem_flags = INT_SIZE | SIGNED;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_s32)srcw;
break;
default:
@@ -1220,7 +1467,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
break;
}
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG1, srcw));
else if (!(src & SLJIT_MEM))
dst_r = src;
@@ -1233,15 +1480,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
}
flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0;
- mem_flags = WORD_SIZE;
- if (op_flags & SLJIT_I32_OP) {
- flags |= INT_OP;
+ switch (op) {
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ mem_flags = HALF_SIZE;
+ break;
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
mem_flags = INT_SIZE;
- }
+ break;
+ default:
+ mem_flags = WORD_SIZE;
- if (dst == SLJIT_UNUSED)
- flags |= UNUSED_RETURN;
+ if (op_flags & SLJIT_32) {
+ flags |= INT_OP;
+ mem_flags = INT_SIZE;
+ }
+ break;
+ }
if (src & SLJIT_MEM) {
FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src, srcw, TMP_REG2));
@@ -1263,24 +1520,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
sljit_s32 dst_r, flags, mem_flags;
CHECK_ERROR();
- CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
- return SLJIT_SUCCESS;
-
- dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
mem_flags = WORD_SIZE;
- if (op & SLJIT_I32_OP) {
+ if (op & SLJIT_32) {
flags |= INT_OP;
mem_flags = INT_SIZE;
}
- if (dst == SLJIT_UNUSED)
+ if (dst == TMP_REG1)
flags |= UNUSED_RETURN;
if (src1 & SLJIT_MEM) {
@@ -1293,12 +1547,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
src2 = TMP_REG2;
}
- if (src1 & SLJIT_IMM)
+ if (src1 == SLJIT_IMM)
flags |= ARG1_IMM;
else
src1w = src1;
- if (src2 & SLJIT_IMM)
+ if (src2 == SLJIT_IMM)
flags |= ARG2_IMM;
else
src2w = src2;
@@ -1310,6 +1564,86 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return SLJIT_SUCCESS;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, TMP_REG1, 0, src1, src1w, src2, src2w);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
+{
+ sljit_ins inv_bits, imm;
+ sljit_s32 is_left;
+ sljit_sw mask;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_shift_into(compiler, op, dst_reg, src1_reg, src2_reg, src3, src3w));
+
+ is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL);
+
+ if (src1_reg == src2_reg) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, (is_left ? SLJIT_ROTL : SLJIT_ROTR) | (op & SLJIT_32), dst_reg, 0, src1_reg, 0, src3, src3w);
+ }
+
+ ADJUST_LOCAL_OFFSET(src3, src3w);
+
+ inv_bits = (op & SLJIT_32) ? W_OP : 0;
+
+ if (src3 == SLJIT_IMM) {
+ mask = inv_bits ? 0x1f : 0x3f;
+ src3w &= mask;
+
+ if (src3w == 0)
+ return SLJIT_SUCCESS;
+
+ if (is_left)
+ src3w = (src3w ^ mask) + 1;
+
+ return push_inst(compiler, (EXTR ^ (inv_bits | (inv_bits >> 9))) | RD(dst_reg)
+ | RN(is_left ? src1_reg : src2_reg) | RM(is_left ? src2_reg : src1_reg) | ((sljit_ins)src3w << 10));
+ }
+
+ if (src3 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG2, src3, src3w, TMP_REG2));
+ src3 = TMP_REG2;
+ } else if (dst_reg == src3) {
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG2) | RM(src3)));
+ src3 = TMP_REG2;
+ }
+
+ FAIL_IF(push_inst(compiler, ((is_left ? LSLV : LSRV) ^ inv_bits) | RD(dst_reg) | RN(src1_reg) | RM(src3)));
+
+ if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) {
+ /* Shift left/right by 1. */
+ if (is_left)
+ imm = (sljit_ins)(inv_bits ? ((1 << 16) | (31 << 10)) : ((1 << 16) | (63 << 10) | (1 << 22)));
+ else
+ imm = (sljit_ins)(inv_bits ? ((31 << 16) | (30 << 10)) : ((63 << 16) | (62 << 10) | (1 << 22)));
+
+ FAIL_IF(push_inst(compiler, (UBFM ^ (inv_bits | (inv_bits >> 9))) | RD(TMP_REG1) | RN(src2_reg) | imm));
+
+ /* Set imm to mask. */
+ imm = (sljit_ins)(inv_bits ? (4 << 10) : ((5 << 10) | (1 << 22)));
+ FAIL_IF(push_inst(compiler, (EORI ^ inv_bits) | RD(TMP_REG2) | RN(src3) | imm));
+
+ src2_reg = TMP_REG1;
+ } else
+ FAIL_IF(push_inst(compiler, (SUB ^ inv_bits) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(src3)));
+
+ FAIL_IF(push_inst(compiler, ((is_left ? LSRV : LSLV) ^ inv_bits) | RD(TMP_REG1) | RN(src2_reg) | RM(TMP_REG2)));
+ return push_inst(compiler, (ORR ^ inv_bits) | RD(dst_reg) | RN(dst_reg) | RM(TMP_REG1));
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw)
{
@@ -1320,7 +1654,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
switch (op) {
case SLJIT_FAST_RETURN:
if (FAST_IS_REG(src))
- FAIL_IF(push_inst(compiler, ORR | RD(TMP_LR) | RN(TMP_ZERO) | RM(src)));
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_LR) | RM(src)));
else
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_LR, src, srcw, TMP_REG1));
@@ -1350,21 +1684,49 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
{
- CHECK_REG_INDEX(check_sljit_get_register_index(reg));
- return reg_map[reg];
+ sljit_s32 dst_r = TMP_LR;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_dst(compiler, op, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ switch (op) {
+ case SLJIT_FAST_ENTER:
+ if (FAST_IS_REG(dst))
+ return push_inst(compiler, MOV | RD(dst) | RM(TMP_LR));
+ break;
+ case SLJIT_GET_RETURN_ADDRESS:
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, dst_r, SLJIT_MEM1(SLJIT_SP), 0x8, TMP_REG2));
+ break;
+ }
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2);
+
+ return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
{
- CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
+ CHECK_REG_INDEX(check_sljit_get_register_index(type, reg));
+
+ if (type == SLJIT_GP_REGISTER)
+ return reg_map[reg];
+
+ if (type != SLJIT_FLOAT_REGISTER && type != SLJIT_SIMD_REG_64 && type != SLJIT_SIMD_REG_128)
+ return -1;
+
return freg_map[reg];
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
+ SLJIT_UNUSED_ARG(size);
CHECK_ERROR();
CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
@@ -1391,34 +1753,34 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
return push_inst(compiler, STR_FR | type | VT(reg)
| RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
- FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | ((sljit_ins)argw << 10)));
return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1));
}
arg &= REG_MASK;
- if (arg == SLJIT_UNUSED) {
+ if (!arg) {
FAIL_IF(load_immediate(compiler, TMP_REG1, argw & ~(0xfff << shift)));
argw = (argw >> shift) & 0xfff;
- return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
+ return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | ((sljit_ins)argw << 10));
}
if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
if ((argw >> shift) <= 0xfff)
- return push_inst(compiler, STR_FI | type | VT(reg) | RN(arg) | (argw << (10 - shift)));
+ return push_inst(compiler, STR_FI | type | VT(reg) | RN(arg) | ((sljit_ins)argw << (10 - shift)));
if (argw <= 0xffffff) {
- FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(TMP_REG1) | RN(arg) | ((argw >> 12) << 10)));
+ FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(TMP_REG1) | RN(arg) | (((sljit_ins)argw >> 12) << 10)));
argw = ((argw & 0xfff) >> shift);
- return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
+ return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | ((sljit_ins)argw << 10));
}
}
if (argw <= 255 && argw >= -256)
- return push_inst(compiler, STUR_FI | type | VT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
+ return push_inst(compiler, STUR_FI | type | VT(reg) | RN(arg) | (((sljit_ins)argw & 0x1ff) << 12));
FAIL_IF(load_immediate(compiler, TMP_REG1, argw));
return push_inst(compiler, STR_FR | type | VT(reg) | RN(arg) | RM(TMP_REG1));
@@ -1429,13 +1791,13 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
sljit_s32 src, sljit_sw srcw)
{
sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
- sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
+ sljit_ins inv_bits = (op & SLJIT_32) ? (1 << 22) : 0;
if (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64)
inv_bits |= W_OP;
if (src & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw));
src = TMP_FREG1;
}
@@ -1446,60 +1808,90 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
return SLJIT_SUCCESS;
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+static sljit_s32 sljit_emit_fop1_conv_f64_from_w(struct sljit_compiler *compiler, sljit_ins ins,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
- sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
-
- if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
- inv_bits |= W_OP;
if (src & SLJIT_MEM) {
- emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw, TMP_REG1);
+ emit_op_mem(compiler, (ins & W_OP) ? WORD_SIZE : INT_SIZE, TMP_REG1, src, srcw, TMP_REG1);
src = TMP_REG1;
- } else if (src & SLJIT_IMM) {
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
- srcw = (sljit_s32)srcw;
-#endif
+ } else if (src == SLJIT_IMM) {
FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
src = TMP_REG1;
}
- FAIL_IF(push_inst(compiler, (SCVTF ^ inv_bits) | VD(dst_r) | RN(src)));
+ FAIL_IF(push_inst(compiler, ins | VD(dst_r) | RN(src)));
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, ((op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE) | STORE, TMP_FREG1, dst, dstw);
+ return emit_fop_mem(compiler, ((ins & (1 << 22)) ? WORD_SIZE : INT_SIZE) | STORE, TMP_FREG1, dst, dstw);
return SLJIT_SUCCESS;
}
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins inv_bits = (op & SLJIT_32) ? (1 << 22) : 0;
+
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) {
+ inv_bits |= W_OP;
+
+ if (src == SLJIT_IMM)
+ srcw = (sljit_s32)srcw;
+ }
+
+ return sljit_emit_fop1_conv_f64_from_w(compiler, SCVTF ^ inv_bits, dst, dstw, src, srcw);
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins inv_bits = (op & SLJIT_32) ? (1 << 22) : 0;
+
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_U32) {
+ inv_bits |= W_OP;
+
+ if (src == SLJIT_IMM)
+ srcw = (sljit_u32)srcw;
+ }
+
+ return sljit_emit_fop1_conv_f64_from_w(compiler, UCVTF ^ inv_bits, dst, dstw, src, srcw);
+}
+
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- sljit_s32 mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
- sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
+ sljit_s32 mem_flags = (op & SLJIT_32) ? INT_SIZE : WORD_SIZE;
+ sljit_ins inv_bits = (op & SLJIT_32) ? (1 << 22) : 0;
if (src1 & SLJIT_MEM) {
- emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
+ FAIL_IF(emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w);
+ FAIL_IF(emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
- return push_inst(compiler, (FCMP ^ inv_bits) | VN(src1) | VM(src2));
+ FAIL_IF(push_inst(compiler, (FCMP ^ inv_bits) | VN(src1) | VM(src2)));
+
+ if (GET_FLAG_TYPE(op) != SLJIT_UNORDERED_OR_EQUAL)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst(compiler, CSINC | (0x0 << 12) | RD(TMP_REG1) | RN(TMP_ZERO) | RM(TMP_ZERO)));
+ return push_inst(compiler, CCMPI | (0x0 << 16) | (0x7 << 12) | RN(TMP_REG1) | 0x4);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
- sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
+ sljit_s32 dst_r, mem_flags = (op & SLJIT_32) ? INT_SIZE : WORD_SIZE;
sljit_ins inv_bits;
CHECK_ERROR();
@@ -1507,11 +1899,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
SLJIT_COMPILE_ASSERT((INT_SIZE ^ 0x1) == WORD_SIZE, must_be_one_bit_difference);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
- inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
+ inv_bits = (op & SLJIT_32) ? (1 << 22) : 0;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_MEM) {
- emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x1) : mem_flags, dst_r, src, srcw);
+ FAIL_IF(emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x1) : mem_flags, dst_r, src, srcw));
src = dst_r;
}
@@ -1531,7 +1923,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
FAIL_IF(push_inst(compiler, (FABS ^ inv_bits) | VD(dst_r) | VN(src)));
break;
case SLJIT_CONV_F64_FROM_F32:
- FAIL_IF(push_inst(compiler, FCVT | ((op & SLJIT_F32_OP) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src)));
+ FAIL_IF(push_inst(compiler, FCVT | (sljit_ins)((op & SLJIT_32) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src)));
break;
}
@@ -1545,8 +1937,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
- sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
+ sljit_s32 dst_r, mem_flags = (op & SLJIT_32) ? INT_SIZE : WORD_SIZE;
+ sljit_ins inv_bits = (op & SLJIT_32) ? (1 << 22) : 0;
CHECK_ERROR();
CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
@@ -1556,11 +1948,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src1 & SLJIT_MEM) {
- emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
+ FAIL_IF(emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w);
+ FAIL_IF(emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
@@ -1577,6 +1969,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
case SLJIT_DIV_F64:
FAIL_IF(push_inst(compiler, (FDIV ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
break;
+ case SLJIT_COPYSIGN_F64:
+ FAIL_IF(push_inst(compiler, (FMOV_R ^ ((op & SLJIT_32) ? (W_OP | (1 << 22)) : 0)) | VN(src2) | RD(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, (FABS ^ inv_bits) | VD(dst_r) | VN(src1)));
+ FAIL_IF(push_inst(compiler, TBZ | ((op & SLJIT_32) ? 0 : ((sljit_ins)1 << 31)) | (0x1f << 19) | (2 << 5) | RT(TMP_REG1)));
+ return push_inst(compiler, (FNEG ^ inv_bits) | VD(dst_r) | VN(dst_r));
}
if (!(dst & SLJIT_MEM))
@@ -1584,80 +1981,168 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
return emit_fop_mem(compiler, mem_flags | STORE, TMP_FREG1, dst, dstw);
}
-/* --------------------------------------------------------------------- */
-/* Other instructions */
-/* --------------------------------------------------------------------- */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
+{
+ sljit_u32 exp;
+ union {
+ sljit_u32 imm;
+ sljit_f32 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm == 0)
+ return push_inst(compiler, (FMOV_R ^ (W_OP | (1 << 22))) | RN(TMP_ZERO) | VD(freg) | (1 << 16));
+
+ if ((u.imm << (32 - 19)) == 0) {
+ exp = (u.imm >> (23 + 2)) & 0x3f;
+
+ if (exp == 0x20 || exp == 0x1f)
+ return push_inst(compiler, (FMOV_I ^ (1 << 22)) | (sljit_ins)((((u.imm >> 24) & 0x80) | ((u.imm >> 19) & 0x7f)) << 13) | VD(freg));
+ }
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_s32)u.imm));
+ return push_inst(compiler, (FMOV_R ^ (W_OP | (1 << 22))) | RN(TMP_REG1) | VD(freg) | (1 << 16));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
{
+ sljit_uw exp;
+ union {
+ sljit_uw imm;
+ sljit_f64 value;
+ } u;
+
CHECK_ERROR();
- CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
- ADJUST_LOCAL_OFFSET(dst, dstw);
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
+
+ u.value = value;
- if (FAST_IS_REG(dst))
- return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(TMP_LR));
+ if (u.imm == 0)
+ return push_inst(compiler, FMOV_R | RN(TMP_ZERO) | VD(freg) | (sljit_ins)1 << 16);
- /* Memory. */
- return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_LR, dst, dstw, TMP_REG1);
+ if ((u.imm << (64 - 48)) == 0) {
+ exp = (u.imm >> (52 + 2)) & 0x1ff;
+
+ if (exp == 0x100 || exp == 0xff)
+ return push_inst(compiler, FMOV_I | (sljit_ins)((((u.imm >> 56) & 0x80) | ((u.imm >> 48) & 0x7f)) << 13) | VD(freg));
+ }
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_sw)u.imm));
+ return push_inst(compiler, FMOV_R | RN(TMP_REG1) | VD(freg) | (1 << 16));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ if (GET_OPCODE(op) == SLJIT_COPY_TO_F64)
+ inst = FMOV_R | RN(reg) | VD(freg) | (1 << 16);
+ else
+ inst = FMOV_R | VN(freg) | RD(reg);
+
+ if (op & SLJIT_32)
+ inst ^= W_OP | (1 << 22);
+
+ return push_inst(compiler, inst);
}
/* --------------------------------------------------------------------- */
/* Conditional instructions */
/* --------------------------------------------------------------------- */
-static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type)
+static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
{
switch (type) {
case SLJIT_EQUAL:
- case SLJIT_EQUAL_F64:
+ case SLJIT_ATOMIC_STORED:
+ case SLJIT_F_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
return 0x1;
case SLJIT_NOT_EQUAL:
- case SLJIT_NOT_EQUAL_F64:
+ case SLJIT_ATOMIC_NOT_STORED:
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
return 0x0;
+ case SLJIT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD)
+ return 0x3;
+ /* fallthrough */
+
case SLJIT_LESS:
- case SLJIT_LESS_F64:
return 0x2;
+ case SLJIT_NOT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD)
+ return 0x2;
+ /* fallthrough */
+
case SLJIT_GREATER_EQUAL:
- case SLJIT_GREATER_EQUAL_F64:
return 0x3;
case SLJIT_GREATER:
- case SLJIT_GREATER_F64:
+ case SLJIT_UNORDERED_OR_GREATER:
return 0x9;
case SLJIT_LESS_EQUAL:
- case SLJIT_LESS_EQUAL_F64:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
return 0x8;
case SLJIT_SIG_LESS:
+ case SLJIT_UNORDERED_OR_LESS:
return 0xa;
case SLJIT_SIG_GREATER_EQUAL:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
return 0xb;
case SLJIT_SIG_GREATER:
+ case SLJIT_F_GREATER:
+ case SLJIT_ORDERED_GREATER:
return 0xd;
case SLJIT_SIG_LESS_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
return 0xc;
case SLJIT_OVERFLOW:
- if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
+ if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)))
return 0x0;
+ /* fallthrough */
- case SLJIT_UNORDERED_F64:
+ case SLJIT_UNORDERED:
return 0x7;
case SLJIT_NOT_OVERFLOW:
- if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
+ if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)))
return 0x1;
+ /* fallthrough */
- case SLJIT_ORDERED_F64:
+ case SLJIT_ORDERED:
return 0x6;
+ case SLJIT_F_LESS:
+ case SLJIT_ORDERED_LESS:
+ return 0x5;
+
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ return 0x4;
+
default:
SLJIT_UNREACHABLE();
return 0xe;
@@ -1709,14 +2194,16 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 arg_types)
{
+ SLJIT_UNUSED_ARG(arg_types);
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if (type & SLJIT_CALL_RETURN) {
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, 0));
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
+ }
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_jump(compiler, type);
}
@@ -1724,7 +2211,7 @@ static SLJIT_INLINE struct sljit_jump* emit_cmp_to0(struct sljit_compiler *compi
sljit_s32 src, sljit_sw srcw)
{
struct sljit_jump *jump;
- sljit_ins inv_bits = (type & SLJIT_I32_OP) ? W_OP : 0;
+ sljit_ins inv_bits = (type & SLJIT_32) ? W_OP : 0;
SLJIT_ASSERT((type & 0xff) == SLJIT_EQUAL || (type & 0xff) == SLJIT_NOT_EQUAL);
ADJUST_LOCAL_OFFSET(src, srcw);
@@ -1738,7 +2225,7 @@ static SLJIT_INLINE struct sljit_jump* emit_cmp_to0(struct sljit_compiler *compi
PTR_FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
- else if (src & SLJIT_IMM) {
+ else if (src == SLJIT_IMM) {
PTR_FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
src = TMP_REG1;
}
@@ -1761,10 +2248,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
CHECK_ERROR();
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
- ADJUST_LOCAL_OFFSET(src, srcw);
- if (!(src & SLJIT_IMM)) {
+ if (src != SLJIT_IMM) {
if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
@@ -1775,7 +2262,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0));
- jump->u.target = srcw;
+ jump->u.target = (sljit_uw)srcw;
FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
jump->addr = compiler->size;
@@ -1786,14 +2273,27 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
sljit_s32 arg_types,
sljit_s32 src, sljit_sw srcw)
{
+ SLJIT_UNUSED_ARG(arg_types);
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ }
+
+ if (type & SLJIT_CALL_RETURN) {
+ if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | RM(src)));
+ src = TMP_REG1;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
+ type = SLJIT_JUMP;
+ }
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_ijump(compiler, type, src, srcw);
}
@@ -1808,7 +2308,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
ADJUST_LOCAL_OFFSET(dst, dstw);
- cc = get_cc(compiler, type & 0xff);
+ cc = get_cc(compiler, type);
dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
if (GET_OPCODE(op) < SLJIT_ADD) {
@@ -1825,7 +2325,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
mem_flags = WORD_SIZE;
- if (op & SLJIT_I32_OP) {
+ if (op & SLJIT_32) {
flags |= INT_OP;
mem_flags = INT_SIZE;
}
@@ -1845,39 +2345,135 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
{
- sljit_ins inv_bits = (dst_reg & SLJIT_I32_OP) ? W_OP : 0;
+ sljit_ins inv_bits = (type & SLJIT_32) ? W_OP : 0;
sljit_ins cc;
CHECK_ERROR();
- CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
+ CHECK(check_sljit_emit_select(compiler, type, dst_reg, src1, src1w, src2_reg));
- if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
- if (dst_reg & SLJIT_I32_OP)
- srcw = (sljit_s32)srcw;
- FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
- src = TMP_REG1;
- srcw = 0;
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (src1 == SLJIT_IMM) {
+ if (type & SLJIT_32)
+ src1w = (sljit_s32)src1w;
+ FAIL_IF(load_immediate(compiler, TMP_REG1, src1w));
+ src1 = TMP_REG1;
+ } else if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src1, src1w, TMP_REG2));
+ src1 = TMP_REG1;
}
- cc = get_cc(compiler, type & 0xff);
- dst_reg &= ~SLJIT_I32_OP;
+ cc = get_cc(compiler, type & ~SLJIT_32);
+ return push_inst(compiler, (CSEL ^ inv_bits) | (cc << 12) | RD(dst_reg) | RN(src2_reg) | RM(src1));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
+{
+ sljit_ins inv_bits = (type & SLJIT_32) ? (1 << 22) : 0;
+ sljit_ins cc;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fselect(compiler, type, dst_freg, src1, src1w, src2_freg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_fop_mem(compiler, (type & SLJIT_32) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src1, src1w));
+ src1 = TMP_FREG1;
+ }
- return push_inst(compiler, (CSEL ^ inv_bits) | (cc << 12) | RD(dst_reg) | RN(dst_reg) | RM(src));
+ cc = get_cc(compiler, type & ~SLJIT_32);
+ return push_inst(compiler, (FCSEL ^ inv_bits) | (cc << 12) | VD(dst_freg) | VN(src2_freg) | VM(src1));
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 reg,
sljit_s32 mem, sljit_sw memw)
{
- sljit_u32 sign = 0, inst;
+ sljit_u32 inst;
CHECK_ERROR();
CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+ if (!(reg & REG_PAIR_MASK))
+ return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
+
+ ADJUST_LOCAL_OFFSET(mem, memw);
+
+ if (!(mem & REG_MASK)) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, memw & ~0x1f8));
+
+ mem = SLJIT_MEM1(TMP_REG1);
+ memw &= 0x1f8;
+ } else if (mem & OFFS_REG_MASK) {
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(mem & REG_MASK) | RM(OFFS_REG(mem)) | ((sljit_ins)(memw & 0x3) << 10)));
+
+ mem = SLJIT_MEM1(TMP_REG1);
+ memw = 0;
+ } else if ((memw & 0x7) != 0 || memw > 0x1f8 || memw < -0x200) {
+ inst = ADDI;
+
+ if (memw < 0) {
+ /* Remains negative for integer min. */
+ memw = -memw;
+ inst = SUBI;
+ } else if ((memw & 0x7) == 0 && memw <= 0x7ff0) {
+ if (!(type & SLJIT_MEM_STORE) && (mem & REG_MASK) == REG_PAIR_FIRST(reg)) {
+ FAIL_IF(push_inst(compiler, LDRI | RD(REG_PAIR_SECOND(reg)) | RN(mem & REG_MASK) | ((sljit_ins)memw << 7)));
+ return push_inst(compiler, LDRI | RD(REG_PAIR_FIRST(reg)) | RN(mem & REG_MASK) | ((sljit_ins)(memw + 0x8) << 7));
+ }
+
+ inst = (type & SLJIT_MEM_STORE) ? STRI : LDRI;
+
+ FAIL_IF(push_inst(compiler, inst | RD(REG_PAIR_FIRST(reg)) | RN(mem & REG_MASK) | ((sljit_ins)memw << 7)));
+ return push_inst(compiler, inst | RD(REG_PAIR_SECOND(reg)) | RN(mem & REG_MASK) | ((sljit_ins)(memw + 0x8) << 7));
+ }
+
+ if ((sljit_uw)memw <= 0xfff) {
+ FAIL_IF(push_inst(compiler, inst | RD(TMP_REG1) | RN(mem & REG_MASK) | ((sljit_ins)memw << 10)));
+ memw = 0;
+ } else if ((sljit_uw)memw <= 0xffffff) {
+ FAIL_IF(push_inst(compiler, inst | (1 << 22) | RD(TMP_REG1) | RN(mem & REG_MASK) | (((sljit_ins)memw >> 12) << 10)));
+
+ if ((memw & 0xe07) != 0) {
+ FAIL_IF(push_inst(compiler, inst | RD(TMP_REG1) | RN(TMP_REG1) | (((sljit_ins)memw & 0xfff) << 10)));
+ memw = 0;
+ } else {
+ memw &= 0xfff;
+ }
+ } else {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, memw));
+ FAIL_IF(push_inst(compiler, (inst == ADDI ? ADD : SUB) | RD(TMP_REG1) | RN(mem & REG_MASK) | RM(TMP_REG1)));
+ memw = 0;
+ }
+
+ mem = SLJIT_MEM1(TMP_REG1);
+
+ if (inst == SUBI)
+ memw = -memw;
+ }
+
+ SLJIT_ASSERT((memw & 0x7) == 0 && memw <= 0x1f8 && memw >= -0x200);
+ return push_inst(compiler, ((type & SLJIT_MEM_STORE) ? STP : LDP) | RT(REG_PAIR_FIRST(reg)) | RT2(REG_PAIR_SECOND(reg)) | RN(mem & REG_MASK) | (sljit_ins)((memw & 0x3f8) << 12));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_u32 sign = 0, inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw));
+
if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -256))
return SLJIT_ERR_UNSUPPORTED;
@@ -1891,17 +2487,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
break;
case SLJIT_MOV_S8:
sign = 1;
+ /* fallthrough */
case SLJIT_MOV_U8:
inst = STURBI | (MEM_SIZE_SHIFT(BYTE_SIZE) << 30) | 0x400;
break;
case SLJIT_MOV_S16:
sign = 1;
+ /* fallthrough */
case SLJIT_MOV_U16:
inst = STURBI | (MEM_SIZE_SHIFT(HALF_SIZE) << 30) | 0x400;
break;
case SLJIT_MOV_S32:
sign = 1;
+ /* fallthrough */
case SLJIT_MOV_U32:
+ case SLJIT_MOV32:
inst = STURBI | (MEM_SIZE_SHIFT(INT_SIZE) << 30) | 0x400;
break;
default:
@@ -1913,20 +2513,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
if (!(type & SLJIT_MEM_STORE))
inst |= sign ? 0x00800000 : 0x00400000;
- if (type & SLJIT_MEM_PRE)
+ if (!(type & SLJIT_MEM_POST))
inst |= 0x800;
- return push_inst(compiler, inst | RT(reg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
+ return push_inst(compiler, inst | RT(reg) | RN(mem & REG_MASK) | (sljit_ins)((memw & 0x1ff) << 12));
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 freg,
sljit_s32 mem, sljit_sw memw)
{
sljit_u32 inst;
CHECK_ERROR();
- CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
+ CHECK(check_sljit_emit_fmem_update(compiler, type, freg, mem, memw));
if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -256))
return SLJIT_ERR_UNSUPPORTED;
@@ -1936,16 +2536,671 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil
inst = STUR_FI | 0x80000400;
- if (!(type & SLJIT_F32_OP))
+ if (!(type & SLJIT_32))
inst |= 0x40000000;
if (!(type & SLJIT_MEM_STORE))
inst |= 0x00400000;
- if (type & SLJIT_MEM_PRE)
+ if (!(type & SLJIT_MEM_POST))
inst |= 0x800;
- return push_inst(compiler, inst | VT(freg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
+ return push_inst(compiler, inst | VT(freg) | RN(mem & REG_MASK) | (sljit_ins)((memw & 0x1ff) << 12));
+}
+
+static sljit_s32 sljit_emit_simd_mem_offset(struct sljit_compiler *compiler, sljit_s32 *mem_ptr, sljit_sw memw)
+{
+ sljit_ins ins;
+ sljit_s32 mem = *mem_ptr;
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ *mem_ptr = TMP_REG1;
+ return push_inst(compiler, ADD | RD(TMP_REG1) | RN(mem & REG_MASK) | RM(OFFS_REG(mem)) | ((sljit_ins)(memw & 0x3) << 10));
+ }
+
+ if (!(mem & REG_MASK)) {
+ *mem_ptr = TMP_REG1;
+ return load_immediate(compiler, TMP_REG1, memw);
+ }
+
+ mem &= REG_MASK;
+
+ if (memw == 0) {
+ *mem_ptr = mem;
+ return SLJIT_SUCCESS;
+ }
+
+ *mem_ptr = TMP_REG1;
+
+ if (memw < -0xffffff || memw > 0xffffff) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, memw));
+ return push_inst(compiler, ADD | RD(TMP_REG1) | RN(TMP_REG1) | RM(mem));
+ }
+
+ ins = ADDI;
+
+ if (memw < 0) {
+ memw = -memw;
+ ins = SUBI;
+ }
+
+ if (memw > 0xfff) {
+ FAIL_IF(push_inst(compiler, ins | (1 << 22) | RD(TMP_REG1) | RN(mem) | ((sljit_ins)(memw >> 12) << 10)));
+
+ memw &= 0xfff;
+ if (memw == 0)
+ return SLJIT_SUCCESS;
+
+ mem = TMP_REG1;
+ }
+
+ return push_inst(compiler, ins | RD(TMP_REG1) | RN(mem) | ((sljit_ins)memw << 10));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_mov(compiler, type, freg, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (!(srcdst & SLJIT_MEM)) {
+ if (type & SLJIT_SIMD_STORE)
+ ins = VD(srcdst) | VN(freg) | VM(freg);
+ else
+ ins = VD(freg) | VN(srcdst) | VM(srcdst);
+
+ if (reg_size == 4)
+ ins |= (1 << 30);
+
+ return push_inst(compiler, ORR_v | ins);
+ }
+
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &srcdst, srcdstw));
+
+ if (elem_size > 3)
+ elem_size = 3;
+
+ ins = (type & SLJIT_SIMD_STORE) ? ST1 : LD1;
+
+ if (reg_size == 4)
+ ins |= (1 << 30);
+
+ return push_inst(compiler, ins | ((sljit_ins)elem_size << 10) | RN(srcdst) | VT(freg));
+}
+
+static sljit_ins simd_get_imm(sljit_s32 elem_size, sljit_uw value)
+{
+ sljit_ins result;
+
+ if (elem_size > 2 && (sljit_u32)value == (value >> 32)) {
+ elem_size = 2;
+ value = (sljit_u32)value;
+ }
+
+ if (elem_size == 2 && (sljit_u16)value == (value >> 16)) {
+ elem_size = 1;
+ value = (sljit_u16)value;
+ }
+
+ if (elem_size == 1 && (sljit_u8)value == (value >> 8)) {
+ elem_size = 0;
+ value = (sljit_u8)value;
+ }
+
+ switch (elem_size) {
+ case 0:
+ SLJIT_ASSERT(value <= 0xff);
+ result = 0xe000;
+ break;
+ case 1:
+ SLJIT_ASSERT(value <= 0xffff);
+ result = 0;
+
+ while (1) {
+ if (value <= 0xff) {
+ result |= 0x8000;
+ break;
+ }
+
+ if ((value & 0xff) == 0) {
+ value >>= 8;
+ result |= 0xa000;
+ break;
+ }
+
+ if (result != 0)
+ return ~(sljit_ins)0;
+
+ value ^= (sljit_uw)0xffff;
+ result = (1 << 29);
+ }
+ break;
+ case 2:
+ SLJIT_ASSERT(value <= 0xffffffff);
+ result = 0;
+
+ while (1) {
+ if (value <= 0xff) {
+ result |= 0x0000;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff00) == 0) {
+ value >>= 8;
+ result |= 0x2000;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff0000) == 0) {
+ value >>= 16;
+ result |= 0x4000;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff000000) == 0) {
+ value >>= 24;
+ result |= 0x6000;
+ break;
+ }
+
+ if ((value & (sljit_uw)0xff) == 0xff && (value >> 16) == 0) {
+ value >>= 8;
+ result |= 0xc000;
+ break;
+ }
+
+ if ((value & (sljit_uw)0xffff) == 0xffff && (value >> 24) == 0) {
+ value >>= 16;
+ result |= 0xd000;
+ break;
+ }
+
+ if (result != 0)
+ return ~(sljit_ins)0;
+
+ value ^= (sljit_uw)0xffffffff;
+ result = (1 << 29);
+ }
+ break;
+ default:
+ return ~(sljit_ins)0;
+ }
+
+ return (((sljit_ins)value & 0x1f) << 5) | (((sljit_ins)value & 0xe0) << 11) | result;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins, imm;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_replicate(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &src, srcw));
+
+ ins = (sljit_ins)elem_size << 10;
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 30;
+
+ return push_inst(compiler, LD1R | ins | RN(src) | VT(freg));
+ }
+
+ ins = (sljit_ins)1 << (16 + elem_size);
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 30;
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (src == SLJIT_IMM)
+ return push_inst(compiler, MOVI | (ins & ((sljit_ins)1 << 30)) | VD(freg));
+
+ return push_inst(compiler, DUP_e | ins | VD(freg) | VN(src));
+ }
+
+ if (src == SLJIT_IMM) {
+ if (elem_size < 3)
+ srcw &= ((sljit_sw)1 << (((sljit_sw)1 << elem_size) << 3)) - 1;
+
+ imm = simd_get_imm(elem_size, (sljit_uw)srcw);
+
+ if (imm != ~(sljit_ins)0) {
+ imm |= ins & ((sljit_ins)1 << 30);
+
+ return push_inst(compiler, MOVI | imm | VD(freg));
+ }
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
+ src = TMP_REG1;
+ }
+
+ return push_inst(compiler, DUP_g | ins | VD(freg) | RN(src));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg, sljit_s32 lane_index,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_mov(compiler, type, freg, lane_index, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (type & SLJIT_SIMD_LANE_ZERO) {
+ ins = (reg_size == 3) ? 0 : ((sljit_ins)1 << 30);
+
+ if ((type & SLJIT_SIMD_FLOAT) && freg == srcdst) {
+ FAIL_IF(push_inst(compiler, ORR_v | ins | VD(TMP_FREG1) | VN(freg) | VM(freg)));
+ srcdst = TMP_FREG1;
+ srcdstw = 0;
+ }
+
+ FAIL_IF(push_inst(compiler, MOVI | ins | VD(freg)));
+ }
+
+ if (srcdst & SLJIT_MEM) {
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &srcdst, srcdstw));
+
+ if (elem_size == 3)
+ ins = 0x8400;
+ else if (elem_size == 0)
+ ins = 0;
+ else
+ ins = (sljit_ins)0x2000 << elem_size;
+
+ lane_index = lane_index << elem_size;
+ ins |= (sljit_ins)(((lane_index & 0x8) << 27) | ((lane_index & 0x7) << 10));
+
+ return push_inst(compiler, ((type & SLJIT_SIMD_STORE) ? ST1_s : LD1_s) | ins | RN(srcdst) | VT(freg));
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (type & SLJIT_SIMD_STORE)
+ ins = INS_e | ((sljit_ins)1 << (16 + elem_size)) | ((sljit_ins)lane_index << (11 + elem_size)) | VD(srcdst) | VN(freg);
+ else
+ ins = INS_e | ((((sljit_ins)lane_index << 1) | 1) << (16 + elem_size)) | VD(freg) | VN(srcdst);
+
+ return push_inst(compiler, ins);
+ }
+
+ if (srcdst == SLJIT_IMM) {
+ if (elem_size < 3)
+ srcdstw &= ((sljit_sw)1 << (((sljit_sw)1 << elem_size) << 3)) - 1;
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, srcdstw));
+ srcdst = TMP_REG1;
+ }
+
+ if (type & SLJIT_SIMD_STORE) {
+ ins = RD(srcdst) | VN(freg);
+
+ if ((type & SLJIT_SIMD_LANE_SIGNED) && (elem_size < 2 || (elem_size == 2 && !(type & SLJIT_32)))) {
+ ins |= SMOV;
+
+ if (!(type & SLJIT_32))
+ ins |= (sljit_ins)1 << 30;
+ } else
+ ins |= UMOV;
+ } else
+ ins = INS | VD(freg) | RN(srcdst);
+
+ if (elem_size == 3)
+ ins |= (sljit_ins)1 << 30;
+
+ return push_inst(compiler, ins | ((((sljit_ins)lane_index << 1) | 1) << (16 + elem_size)));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_s32 src_lane_index)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_replicate(compiler, type, freg, src, src_lane_index));
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ ins = (((sljit_ins)src_lane_index << 1) | 1) << (16 + elem_size);
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 30;
+
+ return push_inst(compiler, DUP_e | ins | VD(freg) | VN(src));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_extend(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 elem2_size = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_extend(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size != 2 || elem2_size != 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &src, srcw));
+
+ if (reg_size == 4 && elem2_size - elem_size == 1)
+ FAIL_IF(push_inst(compiler, LD1 | ((sljit_ins)elem_size << 10) | RN(src) | VT(freg)));
+ else
+ FAIL_IF(push_inst(compiler, LD1_s | ((sljit_ins)0x2000 << (reg_size - elem2_size + elem_size)) | RN(src) | VT(freg)));
+ src = freg;
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ SLJIT_ASSERT(reg_size == 4);
+ return push_inst(compiler, FCVTL | (1 << 22) | VD(freg) | VN(src));
+ }
+
+ do {
+ FAIL_IF(push_inst(compiler, ((type & SLJIT_SIMD_EXTEND_SIGNED) ? SSHLL : USHLL)
+ | ((sljit_ins)1 << (19 + elem_size)) | VD(freg) | VN(src)));
+ src = freg;
+ } while (++elem_size < elem2_size);
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_sign(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins, imms;
+ sljit_s32 dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_sign(compiler, type, freg, dst, dstw));
+
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ switch (elem_size) {
+ case 0:
+ imms = 0x643219;
+ ins = USHR | (0x9 << 16);
+ break;
+ case 1:
+ imms = (reg_size == 4) ? 0x643219 : 0x6231;
+ ins = USHR | (0x11 << 16);
+ break;
+ case 2:
+ imms = (reg_size == 4) ? 0x6231 : 0x61;
+ ins = USHR | (0x21 << 16);
+ break;
+ default:
+ imms = 0x61;
+ ins = USHR | (0x41 << 16);
+ break;
+ }
+
+ if (reg_size == 4)
+ ins |= (1 << 30);
+
+ FAIL_IF(push_inst(compiler, ins | VD(TMP_FREG1) | VN(freg)));
+
+ if (reg_size == 4 && elem_size > 0)
+ FAIL_IF(push_inst(compiler, XTN | ((sljit_ins)(elem_size - 1) << 22) | VD(TMP_FREG1) | VN(TMP_FREG1)));
+
+ if (imms >= 0x100) {
+ ins = (reg_size == 4 && elem_size == 0) ? (1 << 30) : 0;
+
+ do {
+ FAIL_IF(push_inst(compiler, USRA | ins | ((imms & 0xff) << 16) | VD(TMP_FREG1) | VN(TMP_FREG1)));
+ imms >>= 8;
+ } while (imms >= 0x100);
+ }
+
+ FAIL_IF(push_inst(compiler, USRA | (1 << 30) | (imms << 16) | VD(TMP_FREG1) | VN(TMP_FREG1)));
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ ins = (0x1 << 16);
+
+ if (reg_size == 4 && elem_size == 0) {
+ FAIL_IF(push_inst(compiler, INS_e | (0x3 << 16) | (0x8 << 11) | VD(TMP_FREG1) | VN(TMP_FREG1)));
+ ins = (0x2 << 16);
+ }
+
+ FAIL_IF(push_inst(compiler, UMOV | ins | RD(dst_r) | VN(TMP_FREG1)));
+
+ if (dst_r == TMP_REG1)
+ return emit_op_mem(compiler, STORE | ((type & SLJIT_32) ? INT_SIZE : WORD_SIZE), TMP_REG1, dst, dstw, TMP_REG2);
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_op2(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src1_freg, sljit_s32 src2_freg)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_op2(compiler, type, dst_freg, src1_freg, src2_freg));
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ switch (SLJIT_SIMD_GET_OPCODE(type)) {
+ case SLJIT_SIMD_OP2_AND:
+ ins = AND_v;
+ break;
+ case SLJIT_SIMD_OP2_OR:
+ ins = ORR_v;
+ break;
+ case SLJIT_SIMD_OP2_XOR:
+ ins = EOR_v;
+ break;
+ }
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 30;
+
+ return push_inst(compiler, ins | VD(dst_freg) | VN(src1_freg) | VM(src2_freg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_load(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 mem_reg)
+{
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_load(compiler, op, dst_reg, mem_reg));
+
+#ifdef __ARM_FEATURE_ATOMICS
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV32:
+ case SLJIT_MOV_U32:
+ ins = LDR ^ (1 << 30);
+ break;
+ case SLJIT_MOV_U16:
+ ins = LDRH;
+ break;
+ case SLJIT_MOV_U8:
+ ins = LDRB;
+ break;
+ default:
+ ins = LDR;
+ break;
+ }
+#else /* !__ARM_FEATURE_ATOMICS */
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV32:
+ case SLJIT_MOV_U32:
+ ins = LDXR ^ (1 << 30);
+ break;
+ case SLJIT_MOV_U8:
+ ins = LDXRB;
+ break;
+ case SLJIT_MOV_U16:
+ ins = LDXRH;
+ break;
+ default:
+ ins = LDXR;
+ break;
+ }
+#endif /* ARM_FEATURE_ATOMICS */
+ return push_inst(compiler, ins | RN(mem_reg) | RT(dst_reg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_store(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src_reg,
+ sljit_s32 mem_reg,
+ sljit_s32 temp_reg)
+{
+ sljit_ins ins;
+ sljit_s32 tmp = temp_reg;
+ sljit_ins cmp = 0;
+ sljit_ins inv_bits = W_OP;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_store(compiler, op, src_reg, mem_reg, temp_reg));
+
+#ifdef __ARM_FEATURE_ATOMICS
+ if (op & SLJIT_SET_ATOMIC_STORED)
+ cmp = (SUBS ^ W_OP) | RD(TMP_ZERO);
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV32:
+ case SLJIT_MOV_U32:
+ ins = CAS ^ (1 << 30);
+ break;
+ case SLJIT_MOV_U16:
+ ins = CASH;
+ break;
+ case SLJIT_MOV_U8:
+ ins = CASB;
+ break;
+ default:
+ ins = CAS;
+ inv_bits = 0;
+ if (cmp)
+ cmp ^= W_OP;
+ break;
+ }
+
+ if (cmp) {
+ FAIL_IF(push_inst(compiler, (MOV ^ inv_bits) | RM(temp_reg) | RD(TMP_REG1)));
+ tmp = TMP_REG1;
+ }
+ FAIL_IF(push_inst(compiler, ins | RM(tmp) | RN(mem_reg) | RD(src_reg)));
+ if (!cmp)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst(compiler, cmp | RM(tmp) | RN(temp_reg)));
+ FAIL_IF(push_inst(compiler, (CSET ^ inv_bits) | RD(tmp)));
+ return push_inst(compiler, cmp | RM(tmp) | RN(TMP_ZERO));
+#else /* !__ARM_FEATURE_ATOMICS */
+ SLJIT_UNUSED_ARG(tmp);
+ SLJIT_UNUSED_ARG(inv_bits);
+
+ if (op & SLJIT_SET_ATOMIC_STORED)
+ cmp = (SUBI ^ W_OP) | (1 << 29);
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV32:
+ case SLJIT_MOV_U32:
+ ins = STXR ^ (1 << 30);
+ break;
+ case SLJIT_MOV_U8:
+ ins = STXRB;
+ break;
+ case SLJIT_MOV_U16:
+ ins = STXRH;
+ break;
+ default:
+ ins = STXR;
+ break;
+ }
+
+ FAIL_IF(push_inst(compiler, ins | RM(TMP_REG1) | RN(mem_reg) | RT(src_reg)));
+ return cmp ? push_inst(compiler, cmp | RD(TMP_ZERO) | RN(TMP_REG1)) : SLJIT_SUCCESS;
+#endif /* __ARM_FEATURE_ATOMICS */
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
@@ -1955,11 +3210,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *c
CHECK_ERROR();
CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset));
-
- SLJIT_ASSERT (SLJIT_LOCALS_OFFSET_BASE == 0);
+ ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_SP), offset);
dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ /* Not all instruction forms support accessing SP register. */
if (offset <= 0xffffff && offset >= -0xffffff) {
ins = ADDI;
if (offset < 0) {
@@ -1968,13 +3223,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *c
}
if (offset <= 0xfff)
- FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | (offset << 10)));
+ FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | (sljit_ins)(offset << 10)));
else {
- FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | ((offset & 0xfff000) >> (12 - 10)) | (1 << 22)));
+ FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | (sljit_ins)((offset & 0xfff000) >> (12 - 10)) | (1 << 22)));
offset &= 0xfff;
if (offset != 0)
- FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(dst_reg) | (offset << 10)));
+ FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(dst_reg) | (sljit_ins)(offset << 10)));
}
}
else {
@@ -2002,7 +3257,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi
set_const(const_, compiler);
dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
- PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, init_value));
+ PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, (sljit_uw)init_value));
if (dst & SLJIT_MEM)
PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2));
@@ -2034,17 +3289,17 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
{
sljit_ins* inst = (sljit_ins*)addr;
- sljit_s32 dst;
+ sljit_u32 dst;
SLJIT_UNUSED_ARG(executable_offset);
SLJIT_UPDATE_WX_FLAGS(inst, inst + 4, 0);
dst = inst[0] & 0x1f;
SLJIT_ASSERT((inst[0] & 0xffe00000) == MOVZ && (inst[1] & 0xffe00000) == (MOVK | (1 << 21)));
- inst[0] = MOVZ | dst | ((new_target & 0xffff) << 5);
- inst[1] = MOVK | dst | (((new_target >> 16) & 0xffff) << 5) | (1 << 21);
- inst[2] = MOVK | dst | (((new_target >> 32) & 0xffff) << 5) | (2 << 21);
- inst[3] = MOVK | dst | ((new_target >> 48) << 5) | (3 << 21);
+ inst[0] = MOVZ | dst | (((sljit_u32)new_target & 0xffff) << 5);
+ inst[1] = MOVK | dst | (((sljit_u32)(new_target >> 16) & 0xffff) << 5) | (1 << 21);
+ inst[2] = MOVK | dst | (((sljit_u32)(new_target >> 32) & 0xffff) << 5) | (2 << 21);
+ inst[3] = MOVK | dst | ((sljit_u32)(new_target >> 48) << 5) | (3 << 21);
SLJIT_UPDATE_WX_FLAGS(inst, inst + 4, 1);
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
@@ -2053,5 +3308,5 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
{
- sljit_set_jump_addr(addr, new_constant, executable_offset);
+ sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
index e35dbe99b3..c27c50ddb3 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
@@ -49,41 +49,57 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
0, 0, 1, 2, 3, 11, 10, 9, 8, 7, 6, 5, 4, 13, 12, 14, 15
};
-static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
- 0, 0, 1, 2, 3, 4, 5, 6, 7
+static const sljit_u8 freg_map[((SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) << 1) + 1] = {
+ 0,
+ 0, 1, 2, 3, 4, 5, 15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6,
+ 0, 1, 2, 3, 4, 5, 15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6
+};
+
+static const sljit_u8 freg_ebit_map[((SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) << 1) + 1] = {
+ 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1
};
#define COPY_BITS(src, from, to, bits) \
- ((from >= to ? (src >> (from - to)) : (src << (to - from))) & (((1 << bits) - 1) << to))
+ ((from >= to ? ((sljit_ins)(src) >> (from - to)) : ((sljit_ins)(src) << (to - from))) & (((1 << bits) - 1) << to))
+
+#define NEGATE(uimm) ((sljit_uw)-(sljit_sw)(uimm))
/* Thumb16 encodings. */
-#define RD3(rd) (reg_map[rd])
-#define RN3(rn) (reg_map[rn] << 3)
-#define RM3(rm) (reg_map[rm] << 6)
-#define RDN3(rdn) (reg_map[rdn] << 8)
-#define IMM3(imm) (imm << 6)
-#define IMM8(imm) (imm)
+#define RD3(rd) ((sljit_ins)reg_map[rd])
+#define RN3(rn) ((sljit_ins)reg_map[rn] << 3)
+#define RM3(rm) ((sljit_ins)reg_map[rm] << 6)
+#define RDN3(rdn) ((sljit_ins)reg_map[rdn] << 8)
+#define IMM3(imm) ((sljit_ins)imm << 6)
+#define IMM8(imm) ((sljit_ins)imm)
/* Thumb16 helpers. */
#define SET_REGS44(rd, rn) \
- ((reg_map[rn] << 3) | (reg_map[rd] & 0x7) | ((reg_map[rd] & 0x8) << 4))
+ (((sljit_ins)reg_map[rn] << 3) | ((sljit_ins)reg_map[rd] & 0x7) | (((sljit_ins)reg_map[rd] & 0x8) << 4))
#define IS_2_LO_REGS(reg1, reg2) \
(reg_map[reg1] <= 7 && reg_map[reg2] <= 7)
#define IS_3_LO_REGS(reg1, reg2, reg3) \
(reg_map[reg1] <= 7 && reg_map[reg2] <= 7 && reg_map[reg3] <= 7)
/* Thumb32 encodings. */
-#define RD4(rd) (reg_map[rd] << 8)
-#define RN4(rn) (reg_map[rn] << 16)
-#define RM4(rm) (reg_map[rm])
-#define RT4(rt) (reg_map[rt] << 12)
-#define DD4(dd) (freg_map[dd] << 12)
-#define DN4(dn) (freg_map[dn] << 16)
-#define DM4(dm) (freg_map[dm])
+#define RM4(rm) ((sljit_ins)reg_map[rm])
+#define RD4(rd) ((sljit_ins)reg_map[rd] << 8)
+#define RT4(rt) ((sljit_ins)reg_map[rt] << 12)
+#define RN4(rn) ((sljit_ins)reg_map[rn] << 16)
+
+#define VM4(vm) (((sljit_ins)freg_map[vm]) | ((sljit_ins)freg_ebit_map[vm] << 5))
+#define VD4(vd) (((sljit_ins)freg_map[vd] << 12) | ((sljit_ins)freg_ebit_map[vd] << 22))
+#define VN4(vn) (((sljit_ins)freg_map[vn] << 16) | ((sljit_ins)freg_ebit_map[vn] << 7))
+
#define IMM5(imm) \
- (COPY_BITS(imm, 2, 12, 3) | ((imm & 0x3) << 6))
+ (COPY_BITS(imm, 2, 12, 3) | (((sljit_ins)imm & 0x3) << 6))
#define IMM12(imm) \
- (COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff))
+ (COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | ((sljit_ins)imm & 0xff))
/* --------------------------------------------------------------------- */
/* Instrucion forms */
@@ -98,9 +114,9 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define ADDS 0x1800
#define ADDSI3 0x1c00
#define ADDSI8 0x3000
-#define ADD_W 0xeb000000
#define ADDWI 0xf2000000
-#define ADD_SP 0xb000
+#define ADD_SP 0x4485
+#define ADD_SP_I 0xb000
#define ADD_W 0xeb000000
#define ADD_WI 0xf1000000
#define ANDI 0xf0000000
@@ -126,6 +142,12 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define EORS 0x4040
#define EOR_W 0xea800000
#define IT 0xbf00
+#define LDR 0xf8d00000
+#define LDR_SP 0x9800
+#define LDRD 0xe9500000
+#define LDREX 0xe8500f00
+#define LDREXB 0xe8d00f4f
+#define LDREXH 0xe8d00f5f
#define LDRI 0xf8500800
#define LSLS 0x4080
#define LSLSI 0x0000
@@ -155,6 +177,14 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define POP_W 0xe8bd0000
#define PUSH 0xb400
#define PUSH_W 0xe92d0000
+#define REV 0xba00
+#define REV_W 0xfa90f080
+#define REV16 0xba40
+#define REV16_W 0xfa90f090
+#define RBIT 0xfa90f0a0
+#define RORS 0x41c0
+#define ROR_W 0xfa60f000
+#define ROR_WI 0xea4f0030
#define RSB_WI 0xf1c00000
#define RSBSI 0x4240
#define SBCI 0xf1600000
@@ -163,18 +193,24 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define SDIV 0xfb90f0f0
#define SMULL 0xfb800000
#define STR_SP 0x9000
+#define STRD 0xe9400000
+#define STREX 0xe8400000
+#define STREXB 0xe8c00f40
+#define STREXH 0xe8c00f50
#define SUBS 0x1a00
#define SUBSI3 0x1e00
#define SUBSI8 0x3800
#define SUB_W 0xeba00000
#define SUBWI 0xf2a00000
-#define SUB_SP 0xb080
+#define SUB_SP_I 0xb080
#define SUB_WI 0xf1a00000
#define SXTB 0xb240
#define SXTB_W 0xfa4ff080
#define SXTH 0xb200
#define SXTH_W 0xfa0ff080
#define TST 0x4200
+#define TSTI 0xf0000f00
+#define TST_W 0xea000f00
#define UDIV 0xfbb0f0f0
#define UMULL 0xfba00000
#define UXTB 0xb2c0
@@ -183,20 +219,57 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define UXTH_W 0xfa1ff080
#define VABS_F32 0xeeb00ac0
#define VADD_F32 0xee300a00
+#define VAND 0xef000110
#define VCMP_F32 0xeeb40a40
#define VCVT_F32_S32 0xeeb80ac0
+#define VCVT_F32_U32 0xeeb80a40
#define VCVT_F64_F32 0xeeb70ac0
#define VCVT_S32_F32 0xeebd0ac0
#define VDIV_F32 0xee800a00
+#define VDUP 0xee800b10
+#define VDUP_s 0xffb00c00
+#define VEOR 0xff000110
+#define VLD1 0xf9200000
+#define VLD1_r 0xf9a00c00
+#define VLD1_s 0xf9a00000
+#define VLDR_F32 0xed100a00
#define VMOV_F32 0xeeb00a40
#define VMOV 0xee000a10
#define VMOV2 0xec400a10
+#define VMOV_i 0xef800010
+#define VMOV_s 0xee000b10
+#define VMOVN 0xffb20200
#define VMRS 0xeef1fa10
#define VMUL_F32 0xee200a00
#define VNEG_F32 0xeeb10a40
+#define VORR 0xef200110
+#define VPOP 0xecbd0b00
+#define VPUSH 0xed2d0b00
+#define VSHLL 0xef800a10
+#define VSHR 0xef800010
+#define VSRA 0xef800110
+#define VST1 0xf9000000
+#define VST1_s 0xf9800000
#define VSTR_F32 0xed000a00
#define VSUB_F32 0xee300a40
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+
+static sljit_s32 function_check_is_freg(struct sljit_compiler *compiler, sljit_s32 fr, sljit_s32 is_32)
+{
+ if (compiler->scratches == -1)
+ return 0;
+
+ if (is_32 && fr >= SLJIT_F64_SECOND(SLJIT_FR0))
+ fr -= SLJIT_F64_SECOND(0);
+
+ return (fr >= SLJIT_FR0 && fr < (SLJIT_FR0 + compiler->fscratches))
+ || (fr > (SLJIT_FS0 - compiler->fsaveds) && fr <= SLJIT_FS0)
+ || (fr >= SLJIT_TMP_FREGISTER_BASE && fr < (SLJIT_TMP_FREGISTER_BASE + SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS));
+}
+
+#endif /* SLJIT_ARGUMENT_CHECKS */
+
static sljit_s32 push_inst16(struct sljit_compiler *compiler, sljit_ins inst)
{
sljit_u16 *ptr;
@@ -204,7 +277,7 @@ static sljit_s32 push_inst16(struct sljit_compiler *compiler, sljit_ins inst)
ptr = (sljit_u16*)ensure_buf(compiler, sizeof(sljit_u16));
FAIL_IF(!ptr);
- *ptr = inst;
+ *ptr = (sljit_u16)(inst);
compiler->size++;
return SLJIT_SUCCESS;
}
@@ -213,8 +286,8 @@ static sljit_s32 push_inst32(struct sljit_compiler *compiler, sljit_ins inst)
{
sljit_u16 *ptr = (sljit_u16*)ensure_buf(compiler, sizeof(sljit_ins));
FAIL_IF(!ptr);
- *ptr++ = inst >> 16;
- *ptr = inst;
+ *ptr++ = (sljit_u16)(inst >> 16);
+ *ptr = (sljit_u16)(inst);
compiler->size += 2;
return SLJIT_SUCCESS;
}
@@ -229,12 +302,12 @@ static SLJIT_INLINE sljit_s32 emit_imm32_const(struct sljit_compiler *compiler,
static SLJIT_INLINE void modify_imm32_const(sljit_u16 *inst, sljit_uw new_imm)
{
- sljit_s32 dst = inst[1] & 0x0f00;
+ sljit_ins dst = inst[1] & 0x0f00;
SLJIT_ASSERT(((inst[0] & 0xfbf0) == (MOVW >> 16)) && ((inst[2] & 0xfbf0) == (MOVT >> 16)) && dst == (inst[3] & 0x0f00));
- inst[0] = (MOVW >> 16) | COPY_BITS(new_imm, 12, 0, 4) | COPY_BITS(new_imm, 11, 10, 1);
- inst[1] = dst | COPY_BITS(new_imm, 8, 12, 3) | (new_imm & 0xff);
- inst[2] = (MOVT >> 16) | COPY_BITS(new_imm, 12 + 16, 0, 4) | COPY_BITS(new_imm, 11 + 16, 10, 1);
- inst[3] = dst | COPY_BITS(new_imm, 8 + 16, 12, 3) | ((new_imm & 0xff0000) >> 16);
+ inst[0] = (sljit_u16)((MOVW >> 16) | COPY_BITS(new_imm, 12, 0, 4) | COPY_BITS(new_imm, 11, 10, 1));
+ inst[1] = (sljit_u16)(dst | COPY_BITS(new_imm, 8, 12, 3) | (new_imm & 0xff));
+ inst[2] = (sljit_u16)((MOVT >> 16) | COPY_BITS(new_imm, 12 + 16, 0, 4) | COPY_BITS(new_imm, 11 + 16, 10, 1));
+ inst[3] = (sljit_u16)(dst | COPY_BITS(new_imm, 8 + 16, 12, 3) | ((new_imm & 0xff0000) >> 16));
}
static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_u16 *code_ptr, sljit_u16 *code, sljit_sw executable_offset)
@@ -318,24 +391,24 @@ static SLJIT_INLINE void set_jump_instruction(struct sljit_jump *jump, sljit_sw
case 1:
/* Encoding T1 of 'B' instruction */
SLJIT_ASSERT(diff <= 127 && diff >= -128 && (jump->flags & IS_COND));
- jump_inst[0] = 0xd000 | (jump->flags & 0xf00) | (diff & 0xff);
+ jump_inst[0] = (sljit_u16)(0xd000 | (jump->flags & 0xf00) | ((sljit_ins)diff & 0xff));
return;
case 2:
/* Encoding T3 of 'B' instruction */
SLJIT_ASSERT(diff <= 524287 && diff >= -524288 && (jump->flags & IS_COND));
- jump_inst[0] = 0xf000 | COPY_BITS(jump->flags, 8, 6, 4) | COPY_BITS(diff, 11, 0, 6) | COPY_BITS(diff, 19, 10, 1);
- jump_inst[1] = 0x8000 | COPY_BITS(diff, 17, 13, 1) | COPY_BITS(diff, 18, 11, 1) | (diff & 0x7ff);
+ jump_inst[0] = (sljit_u16)(0xf000 | COPY_BITS(jump->flags, 8, 6, 4) | COPY_BITS(diff, 11, 0, 6) | COPY_BITS(diff, 19, 10, 1));
+ jump_inst[1] = (sljit_u16)(0x8000 | COPY_BITS(diff, 17, 13, 1) | COPY_BITS(diff, 18, 11, 1) | ((sljit_ins)diff & 0x7ff));
return;
case 3:
SLJIT_ASSERT(jump->flags & IS_COND);
- *jump_inst++ = IT | ((jump->flags >> 4) & 0xf0) | 0x8;
+ *jump_inst++ = (sljit_u16)(IT | ((jump->flags >> 4) & 0xf0) | 0x8);
diff--;
type = 5;
break;
case 4:
/* Encoding T2 of 'B' instruction */
SLJIT_ASSERT(diff <= 1023 && diff >= -1024 && !(jump->flags & IS_COND));
- jump_inst[0] = 0xe000 | (diff & 0x7ff);
+ jump_inst[0] = (sljit_u16)(0xe000 | (diff & 0x7ff));
return;
}
@@ -345,8 +418,8 @@ static SLJIT_INLINE void set_jump_instruction(struct sljit_jump *jump, sljit_sw
s = (diff >> 23) & 0x1;
j1 = (~(diff >> 22) ^ s) & 0x1;
j2 = (~(diff >> 21) ^ s) & 0x1;
- jump_inst[0] = 0xf000 | (s << 10) | COPY_BITS(diff, 11, 0, 10);
- jump_inst[1] = (j1 << 13) | (j2 << 11) | (diff & 0x7ff);
+ jump_inst[0] = (sljit_u16)(0xf000 | ((sljit_ins)s << 10) | COPY_BITS(diff, 11, 0, 10));
+ jump_inst[1] = (sljit_u16)((j1 << 13) | (j2 << 11) | (diff & 0x7ff));
/* The others have a common form. */
if (type == 5) /* Encoding T4 of 'B' instruction */
@@ -405,7 +478,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
/* These structures are ordered by their address. */
if (label && label->size == half_count) {
label->addr = ((sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset)) | 0x1;
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
if (jump && jump->addr == half_count) {
@@ -424,8 +497,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
}
next_addr = compute_next_addr(label, jump, const_, put_label);
}
- code_ptr ++;
- half_count ++;
+ code_ptr++;
+ half_count++;
} while (buf_ptr < buf_end);
buf = buf->next;
@@ -433,7 +506,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (label && label->size == half_count) {
label->addr = ((sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset)) | 0x1;
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
@@ -457,7 +530,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
compiler->error = SLJIT_ERR_COMPILED;
compiler->executable_offset = executable_offset;
- compiler->executable_size = (code_ptr - code) * sizeof(sljit_u16);
+ compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_u16);
code = (sljit_u16 *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
code_ptr = (sljit_u16 *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
@@ -473,16 +546,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
{
switch (feature_type) {
case SLJIT_HAS_FPU:
+ case SLJIT_HAS_F64_AS_F32_PAIR:
+ case SLJIT_HAS_SIMD:
#ifdef SLJIT_IS_FPU_AVAILABLE
- return SLJIT_IS_FPU_AVAILABLE;
+ return (SLJIT_IS_FPU_AVAILABLE) != 0;
#else
/* Available by default. */
return 1;
#endif
+ case SLJIT_SIMD_REGS_ARE_PAIRS:
case SLJIT_HAS_CLZ:
+ case SLJIT_HAS_CTZ:
+ case SLJIT_HAS_REV:
+ case SLJIT_HAS_ROT:
case SLJIT_HAS_CMOV:
case SLJIT_HAS_PREFETCH:
+ case SLJIT_HAS_COPY_F32:
+ case SLJIT_HAS_COPY_F64:
+ case SLJIT_HAS_ATOMIC:
return 1;
default:
@@ -582,7 +664,7 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
arg1 must be register, imm
arg2 must be register, imm */
sljit_s32 reg;
- sljit_uw imm, nimm;
+ sljit_uw imm, imm2;
if (SLJIT_UNLIKELY((flags & (ARG1_IMM | ARG2_IMM)) == (ARG1_IMM | ARG2_IMM))) {
/* Both are immediates, no temporaries are used. */
@@ -592,59 +674,64 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
}
if (flags & (ARG1_IMM | ARG2_IMM)) {
- reg = (flags & ARG2_IMM) ? arg1 : arg2;
+ reg = (sljit_s32)((flags & ARG2_IMM) ? arg1 : arg2);
imm = (flags & ARG2_IMM) ? arg2 : arg1;
switch (flags & 0xffff) {
case SLJIT_CLZ:
+ case SLJIT_CTZ:
+ case SLJIT_REV:
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
case SLJIT_MUL:
/* No form with immediate operand. */
break;
case SLJIT_MOV:
SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG2);
return load_immediate(compiler, dst, imm);
- case SLJIT_NOT:
- if (!(flags & SET_FLAGS))
- return load_immediate(compiler, dst, ~imm);
- /* Since the flags should be set, we just fallback to the register mode.
- Although some clever things could be done here, "NOT IMM" does not worth the efforts. */
- break;
case SLJIT_ADD:
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
- nimm = -(sljit_sw)imm;
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+ imm2 = NEGATE(imm);
if (IS_2_LO_REGS(reg, dst)) {
if (imm <= 0x7)
return push_inst16(compiler, ADDSI3 | IMM3(imm) | RD3(dst) | RN3(reg));
- if (nimm <= 0x7)
- return push_inst16(compiler, SUBSI3 | IMM3(nimm) | RD3(dst) | RN3(reg));
+ if (imm2 <= 0x7)
+ return push_inst16(compiler, SUBSI3 | IMM3(imm2) | RD3(dst) | RN3(reg));
if (reg == dst) {
if (imm <= 0xff)
return push_inst16(compiler, ADDSI8 | IMM8(imm) | RDN3(dst));
- if (nimm <= 0xff)
- return push_inst16(compiler, SUBSI8 | IMM8(nimm) | RDN3(dst));
+ if (imm2 <= 0xff)
+ return push_inst16(compiler, SUBSI8 | IMM8(imm2) | RDN3(dst));
}
}
if (!(flags & SET_FLAGS)) {
if (imm <= 0xfff)
return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(imm));
- if (nimm <= 0xfff)
- return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(nimm));
+ if (imm2 <= 0xfff)
+ return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(imm2));
}
- nimm = get_imm(imm);
- if (nimm != INVALID_IMM)
- return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm);
- nimm = get_imm(-(sljit_sw)imm);
- if (nimm != INVALID_IMM)
- return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm);
+ imm2 = get_imm(imm);
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2);
+ imm = get_imm(NEGATE(imm));
+ if (imm != INVALID_IMM)
+ return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
break;
case SLJIT_ADDC:
- imm = get_imm(imm);
- if (imm != INVALID_IMM)
- return push_inst32(compiler, ADCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+ imm2 = get_imm(imm);
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, ADCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2);
+ if (flags & ARG2_IMM) {
+ imm = get_imm(~imm);
+ if (imm != INVALID_IMM)
+ return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
+ }
break;
case SLJIT_SUB:
- /* SUB operation can be replaced by ADD because of the negative carry flag. */
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
if (flags & ARG1_IMM) {
if (imm == 0 && IS_2_LO_REGS(reg, dst))
return push_inst16(compiler, RSBSI | RD3(dst) | RN3(reg));
@@ -656,73 +743,89 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
if (flags & UNUSED_RETURN) {
if (imm <= 0xff && reg_map[reg] <= 7)
return push_inst16(compiler, CMPI | IMM8(imm) | RDN3(reg));
- nimm = get_imm(imm);
- if (nimm != INVALID_IMM)
- return push_inst32(compiler, CMPI_W | RN4(reg) | nimm);
- nimm = get_imm(-(sljit_sw)imm);
- if (nimm != INVALID_IMM)
- return push_inst32(compiler, CMNI_W | RN4(reg) | nimm);
+ imm2 = get_imm(imm);
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, CMPI_W | RN4(reg) | imm2);
+ imm = get_imm(NEGATE(imm));
+ if (imm != INVALID_IMM)
+ return push_inst32(compiler, CMNI_W | RN4(reg) | imm);
+ break;
}
- nimm = -(sljit_sw)imm;
+ imm2 = NEGATE(imm);
if (IS_2_LO_REGS(reg, dst)) {
if (imm <= 0x7)
return push_inst16(compiler, SUBSI3 | IMM3(imm) | RD3(dst) | RN3(reg));
- if (nimm <= 0x7)
- return push_inst16(compiler, ADDSI3 | IMM3(nimm) | RD3(dst) | RN3(reg));
+ if (imm2 <= 0x7)
+ return push_inst16(compiler, ADDSI3 | IMM3(imm2) | RD3(dst) | RN3(reg));
if (reg == dst) {
if (imm <= 0xff)
return push_inst16(compiler, SUBSI8 | IMM8(imm) | RDN3(dst));
- if (nimm <= 0xff)
- return push_inst16(compiler, ADDSI8 | IMM8(nimm) | RDN3(dst));
+ if (imm2 <= 0xff)
+ return push_inst16(compiler, ADDSI8 | IMM8(imm2) | RDN3(dst));
}
}
if (!(flags & SET_FLAGS)) {
if (imm <= 0xfff)
return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(imm));
- if (nimm <= 0xfff)
- return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(nimm));
+ if (imm2 <= 0xfff)
+ return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(imm2));
}
- nimm = get_imm(imm);
- if (nimm != INVALID_IMM)
- return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm);
- nimm = get_imm(-(sljit_sw)imm);
- if (nimm != INVALID_IMM)
- return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm);
+ imm2 = get_imm(imm);
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2);
+ imm = get_imm(NEGATE(imm));
+ if (imm != INVALID_IMM)
+ return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
break;
case SLJIT_SUBC:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
if (flags & ARG1_IMM)
break;
- imm = get_imm(imm);
+ imm2 = get_imm(imm);
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2);
+ imm = get_imm(~imm);
if (imm != INVALID_IMM)
- return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
+ return push_inst32(compiler, ADCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
break;
case SLJIT_AND:
- nimm = get_imm(imm);
- if (nimm != INVALID_IMM)
- return push_inst32(compiler, ANDI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm);
- imm = get_imm(imm);
+ imm2 = get_imm(imm);
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, ((flags & UNUSED_RETURN) ? TSTI : ANDI) | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2);
+ imm = get_imm(~imm);
if (imm != INVALID_IMM)
return push_inst32(compiler, BICI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
break;
case SLJIT_OR:
- nimm = get_imm(imm);
- if (nimm != INVALID_IMM)
- return push_inst32(compiler, ORRI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm);
- imm = get_imm(imm);
+ imm2 = get_imm(imm);
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, ORRI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2);
+ imm = get_imm(~imm);
if (imm != INVALID_IMM)
return push_inst32(compiler, ORNI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
break;
case SLJIT_XOR:
+ if (imm == (sljit_uw)-1) {
+ if (IS_2_LO_REGS(dst, reg))
+ return push_inst16(compiler, MVNS | RD3(dst) | RN3(reg));
+ return push_inst32(compiler, MVN_W | (flags & SET_FLAGS) | RD4(dst) | RM4(reg));
+ }
imm = get_imm(imm);
if (imm != INVALID_IMM)
return push_inst32(compiler, EORI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
break;
case SLJIT_SHL:
+ case SLJIT_MSHL:
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
case SLJIT_ASHR:
+ case SLJIT_MASHR:
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
if (flags & ARG1_IMM)
break;
imm &= 0x1f;
+
if (imm == 0) {
if (!(flags & SET_FLAGS))
return push_inst16(compiler, MOV | SET_REGS44(dst, reg));
@@ -730,19 +833,28 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
return push_inst16(compiler, MOVS | RD3(dst) | RN3(reg));
return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(dst) | RM4(reg));
}
+
switch (flags & 0xffff) {
case SLJIT_SHL:
+ case SLJIT_MSHL:
if (IS_2_LO_REGS(dst, reg))
return push_inst16(compiler, LSLSI | RD3(dst) | RN3(reg) | (imm << 6));
return push_inst32(compiler, LSL_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm));
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
if (IS_2_LO_REGS(dst, reg))
return push_inst16(compiler, LSRSI | RD3(dst) | RN3(reg) | (imm << 6));
return push_inst32(compiler, LSR_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm));
- default: /* SLJIT_ASHR */
+ case SLJIT_ASHR:
+ case SLJIT_MASHR:
if (IS_2_LO_REGS(dst, reg))
return push_inst16(compiler, ASRSI | RD3(dst) | RN3(reg) | (imm << 6));
return push_inst32(compiler, ASR_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm));
+ case SLJIT_ROTL:
+ imm = (imm ^ 0x1f) + 1;
+ /* fallthrough */
+ default: /* SLJIT_ROTR */
+ return push_inst32(compiler, ROR_WI | RD4(dst) | RM4(reg) | IMM5(imm));
}
default:
SLJIT_UNREACHABLE();
@@ -752,12 +864,11 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
if (flags & ARG2_IMM) {
imm = arg2;
arg2 = (arg1 == TMP_REG1) ? TMP_REG2 : TMP_REG1;
- FAIL_IF(load_immediate(compiler, arg2, imm));
- }
- else {
+ FAIL_IF(load_immediate(compiler, (sljit_s32)arg2, imm));
+ } else {
imm = arg1;
arg1 = (arg2 == TMP_REG1) ? TMP_REG2 : TMP_REG1;
- FAIL_IF(load_immediate(compiler, arg1, imm));
+ FAIL_IF(load_immediate(compiler, (sljit_s32)arg1, imm));
}
SLJIT_ASSERT(arg1 != arg2);
@@ -768,9 +879,10 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
case SLJIT_MOV:
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
case SLJIT_MOV_P:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2);
- if (dst == arg2)
+ if (dst == (sljit_s32)arg2)
return SLJIT_SUCCESS;
return push_inst16(compiler, MOV | SET_REGS44(dst, arg2));
case SLJIT_MOV_U8:
@@ -793,28 +905,50 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
if (IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, SXTH | RD3(dst) | RN3(arg2));
return push_inst32(compiler, SXTH_W | RD4(dst) | RM4(arg2));
- case SLJIT_NOT:
- SLJIT_ASSERT(arg1 == TMP_REG2);
- if (IS_2_LO_REGS(dst, arg2))
- return push_inst16(compiler, MVNS | RD3(dst) | RN3(arg2));
- return push_inst32(compiler, MVN_W | (flags & SET_FLAGS) | RD4(dst) | RM4(arg2));
case SLJIT_CLZ:
SLJIT_ASSERT(arg1 == TMP_REG2);
- FAIL_IF(push_inst32(compiler, CLZ | RN4(arg2) | RD4(dst) | RM4(arg2)));
- return SLJIT_SUCCESS;
+ return push_inst32(compiler, CLZ | RN4(arg2) | RD4(dst) | RM4(arg2));
+ case SLJIT_CTZ:
+ SLJIT_ASSERT(arg1 == TMP_REG2);
+ FAIL_IF(push_inst32(compiler, RBIT | RN4(arg2) | RD4(dst) | RM4(arg2)));
+ return push_inst32(compiler, CLZ | RN4(dst) | RD4(dst) | RM4(dst));
+ case SLJIT_REV:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+ SLJIT_ASSERT(arg1 == TMP_REG2);
+ if (IS_2_LO_REGS(dst, arg2))
+ return push_inst16(compiler, REV | RD3(dst) | RN3(arg2));
+ return push_inst32(compiler, REV_W | RN4(arg2) | RD4(dst) | RM4(arg2));
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ SLJIT_ASSERT(arg1 == TMP_REG2 && dst != TMP_REG2);
+
+ flags &= 0xffff;
+ if (IS_2_LO_REGS(dst, arg2))
+ FAIL_IF(push_inst16(compiler, REV16 | RD3(dst) | RN3(arg2)));
+ else
+ FAIL_IF(push_inst32(compiler, REV16_W | RN4(arg2) | RD4(dst) | RM4(arg2)));
+
+ if (dst == TMP_REG1 || (arg2 == TMP_REG1 && flags == SLJIT_REV_U16))
+ return SLJIT_SUCCESS;
+
+ if (reg_map[dst] <= 7)
+ return push_inst16(compiler, (flags == SLJIT_REV_U16 ? UXTH : SXTH) | RD3(dst) | RN3(dst));
+ return push_inst32(compiler, (flags == SLJIT_REV_U16 ? UXTH_W : SXTH_W) | RD4(dst) | RM4(dst));
case SLJIT_ADD:
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
if (IS_3_LO_REGS(dst, arg1, arg2))
return push_inst16(compiler, ADDS | RD3(dst) | RN3(arg1) | RM3(arg2));
- if (dst == arg1 && !(flags & SET_FLAGS))
+ if (dst == (sljit_s32)arg1 && !(flags & SET_FLAGS))
return push_inst16(compiler, ADD | SET_REGS44(dst, arg2));
return push_inst32(compiler, ADD_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
case SLJIT_ADDC:
- if (dst == arg1 && IS_2_LO_REGS(dst, arg2))
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+ if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, ADCS | RD3(dst) | RN3(arg2));
return push_inst32(compiler, ADC_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
case SLJIT_SUB:
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
if (flags & UNUSED_RETURN) {
if (IS_2_LO_REGS(arg1, arg2))
return push_inst16(compiler, CMP | RD3(arg1) | RN3(arg2));
@@ -824,7 +958,8 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
return push_inst16(compiler, SUBS | RD3(dst) | RN3(arg1) | RM3(arg2));
return push_inst32(compiler, SUB_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
case SLJIT_SUBC:
- if (dst == arg1 && IS_2_LO_REGS(dst, arg2))
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
+ if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, SBCS | RD3(dst) | RN3(arg2));
return push_inst32(compiler, SBC_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
case SLJIT_MUL:
@@ -836,31 +971,51 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
/* cmp TMP_REG2, dst asr #31. */
return push_inst32(compiler, CMP_W | RN4(TMP_REG2) | 0x70e0 | RM4(dst));
case SLJIT_AND:
- if (dst == arg1 && IS_2_LO_REGS(dst, arg2))
+ if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, ANDS | RD3(dst) | RN3(arg2));
if ((flags & UNUSED_RETURN) && IS_2_LO_REGS(arg1, arg2))
return push_inst16(compiler, TST | RD3(arg1) | RN3(arg2));
- return push_inst32(compiler, AND_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
+ return push_inst32(compiler, ((flags & UNUSED_RETURN) ? TST_W : AND_W) | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
case SLJIT_OR:
- if (dst == arg1 && IS_2_LO_REGS(dst, arg2))
+ if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, ORRS | RD3(dst) | RN3(arg2));
return push_inst32(compiler, ORR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
case SLJIT_XOR:
- if (dst == arg1 && IS_2_LO_REGS(dst, arg2))
+ if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, EORS | RD3(dst) | RN3(arg2));
return push_inst32(compiler, EOR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
+ case SLJIT_MSHL:
+ FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(arg2) | 0x1f));
+ arg2 = TMP_REG2;
+ /* fallthrough */
case SLJIT_SHL:
- if (dst == arg1 && IS_2_LO_REGS(dst, arg2))
+ if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, LSLS | RD3(dst) | RN3(arg2));
return push_inst32(compiler, LSL_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
+ case SLJIT_MLSHR:
+ FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(arg2) | 0x1f));
+ arg2 = TMP_REG2;
+ /* fallthrough */
case SLJIT_LSHR:
- if (dst == arg1 && IS_2_LO_REGS(dst, arg2))
+ if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, LSRS | RD3(dst) | RN3(arg2));
return push_inst32(compiler, LSR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
+ case SLJIT_MASHR:
+ FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(arg2) | 0x1f));
+ arg2 = TMP_REG2;
+ /* fallthrough */
case SLJIT_ASHR:
- if (dst == arg1 && IS_2_LO_REGS(dst, arg2))
+ if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, ASRS | RD3(dst) | RN3(arg2));
return push_inst32(compiler, ASR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2));
+ case SLJIT_ROTL:
+ FAIL_IF(push_inst32(compiler, RSB_WI | RD4(TMP_REG2) | RN4(arg2) | 0));
+ arg2 = TMP_REG2;
+ /* fallthrough */
+ case SLJIT_ROTR:
+ if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2))
+ return push_inst16(compiler, RORS | RD3(dst) | RN3(arg2));
+ return push_inst32(compiler, ROR_W | RD4(dst) | RN4(arg1) | RM4(arg2));
}
SLJIT_UNREACHABLE();
@@ -875,8 +1030,8 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
#define HALF_SIZE 0x08
#define PRELOAD 0x0c
-#define IS_WORD_SIZE(flags) (!(flags & (BYTE_SIZE | HALF_SIZE)))
-#define OFFSET_CHECK(imm, shift) (!(argw & ~(imm << shift)))
+#define IS_WORD_SIZE(flags) (!((flags) & (BYTE_SIZE | HALF_SIZE)))
+#define ALIGN_CHECK(argw, imm, shift) (!((argw) & ~((imm) << (shift))))
/*
1st letter:
@@ -951,20 +1106,22 @@ static const sljit_ins sljit_mem32[13] = {
/* Helper function. Dst should be reg + value, using at most 1 instruction, flags does not set. */
static sljit_s32 emit_set_delta(struct sljit_compiler *compiler, sljit_s32 dst, sljit_s32 reg, sljit_sw value)
{
+ sljit_uw imm;
+
if (value >= 0) {
if (value <= 0xfff)
return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(value));
- value = get_imm(value);
- if (value != INVALID_IMM)
- return push_inst32(compiler, ADD_WI | RD4(dst) | RN4(reg) | value);
+ imm = get_imm((sljit_uw)value);
+ if (imm != INVALID_IMM)
+ return push_inst32(compiler, ADD_WI | RD4(dst) | RN4(reg) | imm);
}
else {
value = -value;
if (value <= 0xfff)
return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(value));
- value = get_imm(value);
- if (value != INVALID_IMM)
- return push_inst32(compiler, SUB_WI | RD4(dst) | RN4(reg) | value);
+ imm = get_imm((sljit_uw)value);
+ if (imm != INVALID_IMM)
+ return push_inst32(compiler, SUB_WI | RD4(dst) | RN4(reg) | imm);
}
return SLJIT_ERR_UNSUPPORTED;
}
@@ -973,20 +1130,19 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
{
sljit_s32 other_r;
- sljit_uw tmp;
+ sljit_uw imm, tmp;
SLJIT_ASSERT(arg & SLJIT_MEM);
- SLJIT_ASSERT((arg & REG_MASK) != tmp_reg);
- arg &= ~SLJIT_MEM;
+ SLJIT_ASSERT((arg & REG_MASK) != tmp_reg || (arg == SLJIT_MEM1(tmp_reg) && argw >= -0xff && argw <= 0xfff));
if (SLJIT_UNLIKELY(!(arg & REG_MASK))) {
- tmp = get_imm(argw & ~0xfff);
- if (tmp != INVALID_IMM) {
- FAIL_IF(push_inst32(compiler, MOV_WI | RD4(tmp_reg) | tmp));
+ imm = get_imm((sljit_uw)argw & ~(sljit_uw)0xfff);
+ if (imm != INVALID_IMM) {
+ FAIL_IF(push_inst32(compiler, MOV_WI | RD4(tmp_reg) | imm));
return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(tmp_reg) | (argw & 0xfff));
}
- FAIL_IF(load_immediate(compiler, tmp_reg, argw));
+ FAIL_IF(load_immediate(compiler, tmp_reg, (sljit_uw)argw));
if (IS_2_LO_REGS(reg, tmp_reg) && sljit_mem16_imm5[flags])
return push_inst16(compiler, sljit_mem16_imm5[flags] | RD3(reg) | RN3(tmp_reg));
return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(tmp_reg));
@@ -995,68 +1151,79 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
argw &= 0x3;
other_r = OFFS_REG(arg);
- arg &= 0xf;
+ arg &= REG_MASK;
if (!argw && IS_3_LO_REGS(reg, arg, other_r))
return push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(other_r));
- return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(other_r) | (argw << 4));
+ return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(other_r) | ((sljit_ins)argw << 4));
}
+ arg &= REG_MASK;
+
if (argw > 0xfff) {
- tmp = get_imm(argw & ~0xfff);
- if (tmp != INVALID_IMM) {
- push_inst32(compiler, ADD_WI | RD4(tmp_reg) | RN4(arg) | tmp);
+ imm = get_imm((sljit_uw)(argw & ~0xfff));
+ if (imm != INVALID_IMM) {
+ push_inst32(compiler, ADD_WI | RD4(tmp_reg) | RN4(arg) | imm);
arg = tmp_reg;
argw = argw & 0xfff;
}
}
else if (argw < -0xff) {
- tmp = get_imm(-argw & ~0xff);
- if (tmp != INVALID_IMM) {
- push_inst32(compiler, SUB_WI | RD4(tmp_reg) | RN4(arg) | tmp);
+ tmp = (sljit_uw)((-argw + 0xfff) & ~0xfff);
+ SLJIT_ASSERT(tmp >= (sljit_uw)-argw);
+ imm = get_imm(tmp);
+
+ if (imm != INVALID_IMM) {
+ push_inst32(compiler, SUB_WI | RD4(tmp_reg) | RN4(arg) | imm);
arg = tmp_reg;
- argw = -(-argw & 0xff);
+ argw += (sljit_sw)tmp;
+
+ SLJIT_ASSERT(argw >= 0 && argw <= 0xfff);
}
}
+ /* 16 bit instruction forms. */
if (IS_2_LO_REGS(reg, arg) && sljit_mem16_imm5[flags]) {
tmp = 3;
if (IS_WORD_SIZE(flags)) {
- if (OFFSET_CHECK(0x1f, 2))
+ if (ALIGN_CHECK(argw, 0x1f, 2))
tmp = 2;
}
else if (flags & BYTE_SIZE)
{
- if (OFFSET_CHECK(0x1f, 0))
+ if (ALIGN_CHECK(argw, 0x1f, 0))
tmp = 0;
}
else {
SLJIT_ASSERT(flags & HALF_SIZE);
- if (OFFSET_CHECK(0x1f, 1))
+ if (ALIGN_CHECK(argw, 0x1f, 1))
tmp = 1;
}
if (tmp < 3)
- return push_inst16(compiler, sljit_mem16_imm5[flags] | RD3(reg) | RN3(arg) | (argw << (6 - tmp)));
+ return push_inst16(compiler, sljit_mem16_imm5[flags] | RD3(reg) | RN3(arg) | ((sljit_ins)argw << (6 - tmp)));
}
- else if (SLJIT_UNLIKELY(arg == SLJIT_SP) && IS_WORD_SIZE(flags) && OFFSET_CHECK(0xff, 2) && reg_map[reg] <= 7) {
+ else if (SLJIT_UNLIKELY(arg == SLJIT_SP) && IS_WORD_SIZE(flags) && ALIGN_CHECK(argw, 0xff, 2) && reg_map[reg] <= 7) {
/* SP based immediate. */
- return push_inst16(compiler, STR_SP | ((flags & STORE) ? 0 : 0x800) | RDN3(reg) | (argw >> 2));
+ return push_inst16(compiler, STR_SP | (sljit_ins)((flags & STORE) ? 0 : 0x800) | RDN3(reg) | ((sljit_ins)argw >> 2));
}
if (argw >= 0 && argw <= 0xfff)
- return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(arg) | argw);
+ return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(arg) | (sljit_ins)argw);
else if (argw < 0 && argw >= -0xff)
- return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(arg) | -argw);
+ return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(arg) | (sljit_ins)-argw);
SLJIT_ASSERT(arg != tmp_reg);
- FAIL_IF(load_immediate(compiler, tmp_reg, argw));
+ FAIL_IF(load_immediate(compiler, tmp_reg, (sljit_uw)argw));
if (IS_3_LO_REGS(reg, arg, tmp_reg))
return push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(tmp_reg));
return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(tmp_reg));
}
+#undef ALIGN_CHECK
+#undef IS_WORD_SIZE
+
/* --------------------------------------------------------------------- */
/* Entry, exit */
/* --------------------------------------------------------------------- */
@@ -1065,114 +1232,208 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 args, size, i, tmp;
- sljit_ins push = 0;
-#ifdef _WIN32
- sljit_uw imm;
+ sljit_s32 size, i, tmp, word_arg_count;
+ sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
+ sljit_uw offset;
+ sljit_uw imm = 0;
+#ifdef __SOFTFP__
+ sljit_u32 float_arg_count;
+#else
+ sljit_u32 old_offset, f32_offset;
+ sljit_u32 remap[3];
+ sljit_u32 *remap_ptr = remap;
#endif
CHECK_ERROR();
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = SLJIT_S0; i >= tmp; i--)
- push |= 1 << reg_map[i];
+ tmp = SLJIT_S0 - saveds;
+ for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--)
+ imm |= (sljit_uw)1 << reg_map[i];
for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--)
- push |= 1 << reg_map[i];
+ imm |= (sljit_uw)1 << reg_map[i];
- FAIL_IF((push & 0xff00)
- ? push_inst32(compiler, PUSH_W | (1 << 14) | push)
- : push_inst16(compiler, PUSH | (1 << 8) | push));
+ /* At least two registers must be set for PUSH_W and one for PUSH instruction. */
+ FAIL_IF((imm & 0xff00)
+ ? push_inst32(compiler, PUSH_W | (1 << 14) | imm)
+ : push_inst16(compiler, PUSH | (1 << 8) | imm));
/* Stack must be aligned to 8 bytes: (LR, R4) */
- size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
- local_size = ((size + local_size + 7) & ~7) - size;
+ size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1);
+
+ if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ if ((size & SSIZE_OF(sw)) != 0) {
+ FAIL_IF(push_inst16(compiler, SUB_SP_I | (sizeof(sljit_sw) >> 2)));
+ size += SSIZE_OF(sw);
+ }
+
+ if (fsaveds + fscratches >= SLJIT_NUMBER_OF_FLOAT_REGISTERS) {
+ FAIL_IF(push_inst32(compiler, VPUSH | VD4(SLJIT_FS0) | ((sljit_uw)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS << 1)));
+ } else {
+ if (fsaveds > 0)
+ FAIL_IF(push_inst32(compiler, VPUSH | VD4(SLJIT_FS0) | ((sljit_uw)fsaveds << 1)));
+ if (fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG)
+ FAIL_IF(push_inst32(compiler, VPUSH | VD4(fscratches) | ((sljit_uw)(fscratches - (SLJIT_FIRST_SAVED_FLOAT_REG - 1)) << 1)));
+ }
+ }
+
+ local_size = ((size + local_size + 0x7) & ~0x7) - size;
compiler->local_size = local_size;
-#ifdef _WIN32
- if (local_size >= 256) {
- if (local_size > 4096)
- imm = get_imm(4096);
- else
- imm = get_imm(local_size & ~0xff);
+ if (options & SLJIT_ENTER_REG_ARG)
+ arg_types = 0;
- SLJIT_ASSERT(imm != INVALID_IMM);
- FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(SLJIT_SP) | imm));
+ arg_types >>= SLJIT_ARG_SHIFT;
+ word_arg_count = 0;
+ saved_arg_count = 0;
+#ifdef __SOFTFP__
+ SLJIT_COMPILE_ASSERT(SLJIT_FR0 == 1, float_register_index_start);
+
+ offset = 0;
+ float_arg_count = 0;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (offset & 0x7)
+ offset += sizeof(sljit_sw);
+
+ if (offset < 4 * sizeof(sljit_sw))
+ FAIL_IF(push_inst32(compiler, VMOV2 | (offset << 10) | ((offset + sizeof(sljit_sw)) << 14) | float_arg_count));
+ else
+ FAIL_IF(push_inst32(compiler, VLDR_F32 | 0x800100 | RN4(SLJIT_SP)
+ | (float_arg_count << 12) | ((offset + (sljit_uw)size - 4 * sizeof(sljit_sw)) >> 2)));
+ float_arg_count++;
+ offset += sizeof(sljit_f64) - sizeof(sljit_sw);
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ if (offset < 4 * sizeof(sljit_sw))
+ FAIL_IF(push_inst32(compiler, VMOV | (float_arg_count << 16) | (offset << 10)));
+ else
+ FAIL_IF(push_inst32(compiler, VLDR_F32 | 0x800000 | RN4(SLJIT_SP)
+ | (float_arg_count << 12) | ((offset + (sljit_uw)size - 4 * sizeof(sljit_sw)) >> 2)));
+ float_arg_count++;
+ break;
+ default:
+ word_arg_count++;
+
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ tmp = SLJIT_S0 - saved_arg_count;
+ saved_arg_count++;
+ } else if (word_arg_count - 1 != (sljit_s32)(offset >> 2))
+ tmp = word_arg_count;
+ else
+ break;
+
+ if (offset < 4 * sizeof(sljit_sw))
+ FAIL_IF(push_inst16(compiler, MOV | ((sljit_ins)reg_map[tmp] & 0x7) | (((sljit_ins)reg_map[tmp] & 0x8) << 4) | (offset << 1)));
+ else if (reg_map[tmp] <= 7)
+ FAIL_IF(push_inst16(compiler, LDR_SP | RDN3(tmp)
+ | ((offset + (sljit_uw)size - 4 * sizeof(sljit_sw)) >> 2)));
+ else
+ FAIL_IF(push_inst32(compiler, LDR | RT4(tmp) | RN4(SLJIT_SP)
+ | ((offset + (sljit_uw)size - 4 * sizeof(sljit_sw)))));
+ break;
+ }
+
+ offset += sizeof(sljit_sw);
+ arg_types >>= SLJIT_ARG_SHIFT;
}
+
+ compiler->args_size = offset;
#else
- if (local_size > 0) {
- if (local_size <= (127 << 2))
- FAIL_IF(push_inst16(compiler, SUB_SP | (local_size >> 2)));
- else
- FAIL_IF(emit_op_imm(compiler, SLJIT_SUB | ARG2_IMM, SLJIT_SP, SLJIT_SP, local_size));
+ offset = SLJIT_FR0;
+ old_offset = SLJIT_FR0;
+ f32_offset = 0;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (offset != old_offset)
+ *remap_ptr++ = VMOV_F32 | SLJIT_32 | VD4(offset) | VM4(old_offset);
+ old_offset++;
+ offset++;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ if (f32_offset != 0) {
+ *remap_ptr++ = VMOV_F32 | 0x20 | VD4(offset) | VM4(f32_offset);
+ f32_offset = 0;
+ } else {
+ if (offset != old_offset)
+ *remap_ptr++ = VMOV_F32 | VD4(offset) | VM4(old_offset);
+ f32_offset = old_offset;
+ old_offset++;
+ }
+ offset++;
+ break;
+ default:
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_S0 - saved_arg_count, SLJIT_R0 + word_arg_count)));
+ saved_arg_count++;
+ }
+
+ word_arg_count++;
+ break;
+ }
+ arg_types >>= SLJIT_ARG_SHIFT;
}
-#endif
- args = get_arg_count(arg_types);
+ SLJIT_ASSERT((sljit_uw)(remap_ptr - remap) <= sizeof(remap));
- if (args >= 1)
- FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_S0, SLJIT_R0)));
- if (args >= 2)
- FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_S1, SLJIT_R1)));
- if (args >= 3)
- FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_S2, SLJIT_R2)));
+ while (remap_ptr > remap)
+ FAIL_IF(push_inst32(compiler, *(--remap_ptr)));
+#endif
#ifdef _WIN32
- if (local_size >= 256) {
- if (local_size > 4096) {
- imm = get_imm(4096);
- SLJIT_ASSERT(imm != INVALID_IMM);
-
- if (local_size < 4 * 4096) {
- if (local_size > 2 * 4096) {
- FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1)));
- FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(TMP_REG1) | imm));
- local_size -= 4096;
- }
+ if (local_size >= 4096) {
+ imm = get_imm(4096);
+ SLJIT_ASSERT(imm != INVALID_IMM);
- if (local_size > 2 * 4096) {
- FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1)));
- FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(TMP_REG1) | imm));
- local_size -= 4096;
- }
+ FAIL_IF(push_inst32(compiler, SUB_WI | RD4(SLJIT_SP) | RN4(SLJIT_SP) | imm));
- FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1)));
- local_size -= 4096;
+ if (local_size < 4 * 4096) {
+ if (local_size > 2 * 4096) {
+ if (local_size > 3 * 4096) {
+ FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG1) | RN4(SLJIT_SP)));
+ FAIL_IF(push_inst32(compiler, SUB_WI | RD4(SLJIT_SP) | RN4(SLJIT_SP) | imm));
+ }
- SLJIT_ASSERT(local_size > 0);
- }
- else {
- FAIL_IF(load_immediate(compiler, SLJIT_R3, (local_size >> 12) - 1));
- FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1)));
- FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(TMP_REG1) | imm));
- SLJIT_ASSERT(reg_map[SLJIT_R3] < 7);
- FAIL_IF(push_inst16(compiler, SUBSI8 | RDN3(SLJIT_R3) | 1));
- FAIL_IF(push_inst16(compiler, BCC | (0x1 << 8) /* not-equal */ | (-7 & 0xff)));
-
- local_size &= 0xfff;
-
- if (local_size != 0)
- FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1)));
+ FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG1) | RN4(SLJIT_SP)));
+ FAIL_IF(push_inst32(compiler, SUB_WI | RD4(SLJIT_SP) | RN4(SLJIT_SP) | imm));
}
+ } else {
+ FAIL_IF(load_immediate(compiler, TMP_REG2, ((sljit_uw)local_size >> 12) - 1));
+ FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG1) | RN4(SLJIT_SP)));
+ FAIL_IF(push_inst32(compiler, SUB_WI | RD4(SLJIT_SP) | RN4(SLJIT_SP) | imm));
+ FAIL_IF(push_inst32(compiler, SUB_WI | SET_FLAGS | RD4(TMP_REG2) | RN4(TMP_REG2) | 1));
+ FAIL_IF(push_inst16(compiler, BCC | (0x1 << 8) /* not-equal */ | (-8 & 0xff)));
+ }
- if (local_size >= 256) {
- imm = get_imm(local_size & ~0xff);
- SLJIT_ASSERT(imm != INVALID_IMM);
+ FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG1) | RN4(SLJIT_SP)));
+ local_size &= 0xfff;
+ }
- FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(TMP_REG1) | imm));
- }
- }
+ if (local_size >= 256) {
+ SLJIT_ASSERT(local_size < 4096);
- local_size &= 0xff;
- FAIL_IF(push_inst32(compiler, LDRI | 0x400 | (local_size > 0 ? 0x100 : 0) | RT4(TMP_REG2) | RN4(TMP_REG1) | local_size));
+ if (local_size <= (127 << 2))
+ FAIL_IF(push_inst16(compiler, SUB_SP_I | ((sljit_uw)local_size >> 2)));
+ else
+ FAIL_IF(emit_op_imm(compiler, SLJIT_SUB | ARG2_IMM, SLJIT_SP, SLJIT_SP, (sljit_uw)local_size));
- FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_SP, TMP_REG1)));
+ FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG1) | RN4(SLJIT_SP)));
+ } else if (local_size > 0)
+ FAIL_IF(push_inst32(compiler, LDRI | 0x500 | RT4(TMP_REG1) | RN4(SLJIT_SP) | (sljit_uw)local_size));
+#else /* !_WIN32 */
+ if (local_size > 0) {
+ if (local_size <= (127 << 2))
+ FAIL_IF(push_inst16(compiler, SUB_SP_I | ((sljit_uw)local_size >> 2)));
+ else
+ FAIL_IF(emit_op_imm(compiler, SLJIT_SUB | ARG2_IMM, SLJIT_SP, SLJIT_SP, (sljit_uw)local_size));
}
- else if (local_size > 0)
- FAIL_IF(push_inst32(compiler, LDRI | 0x500 | RT4(TMP_REG1) | RN4(SLJIT_SP) | local_size));
-#endif
+#endif /* _WIN32 */
return SLJIT_SUCCESS;
}
@@ -1187,38 +1448,210 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
- compiler->local_size = ((size + local_size + 7) & ~7) - size;
+ size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1);
+
+ /* Doubles are saved, so alignment is unaffected. */
+ if ((size & SSIZE_OF(sw)) != 0 && (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG))
+ size += SSIZE_OF(sw);
+
+ compiler->local_size = ((size + local_size + 0x7) & ~0x7) - size;
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+static sljit_s32 emit_add_sp(struct sljit_compiler *compiler, sljit_uw imm)
{
- sljit_s32 i, tmp;
- sljit_ins pop = 0;
+ sljit_uw imm2;
- CHECK_ERROR();
- CHECK(check_sljit_emit_return(compiler, op, src, srcw));
+ /* The TMP_REG1 register must keep its value. */
+ if (imm <= (127u << 2))
+ return push_inst16(compiler, ADD_SP_I | (imm >> 2));
- FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
+ if (imm <= 0xfff)
+ return push_inst32(compiler, ADDWI | RD4(SLJIT_SP) | RN4(SLJIT_SP) | IMM12(imm));
- if (compiler->local_size > 0) {
- if (compiler->local_size <= (127 << 2))
- FAIL_IF(push_inst16(compiler, ADD_SP | (compiler->local_size >> 2)));
- else
- FAIL_IF(emit_op_imm(compiler, SLJIT_ADD | ARG2_IMM, SLJIT_SP, SLJIT_SP, compiler->local_size));
+ imm2 = get_imm(imm);
+
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, ADD_WI | RD4(SLJIT_SP) | RN4(SLJIT_SP) | imm2);
+
+ FAIL_IF(load_immediate(compiler, TMP_REG2, imm));
+ return push_inst16(compiler, ADD_SP | RN3(TMP_REG2));
+}
+
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 frame_size)
+{
+ sljit_s32 local_size, fscratches, fsaveds, i, tmp;
+ sljit_s32 restored_reg = 0;
+ sljit_s32 lr_dst = TMP_PC;
+ sljit_uw reg_list = 0;
+
+ SLJIT_ASSERT(reg_map[TMP_REG2] == 14 && frame_size <= 128);
+
+ local_size = compiler->local_size;
+ fscratches = compiler->fscratches;
+ fsaveds = compiler->fsaveds;
+
+ if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ if (local_size > 0)
+ FAIL_IF(emit_add_sp(compiler, (sljit_uw)local_size));
+
+ if (fsaveds + fscratches >= SLJIT_NUMBER_OF_FLOAT_REGISTERS) {
+ FAIL_IF(push_inst32(compiler, VPOP | VD4(SLJIT_FS0) | ((sljit_uw)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS << 1)));
+ } else {
+ if (fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG)
+ FAIL_IF(push_inst32(compiler, VPOP | VD4(fscratches) | ((sljit_uw)(fscratches - (SLJIT_FIRST_SAVED_FLOAT_REG - 1)) << 1)));
+ if (fsaveds > 0)
+ FAIL_IF(push_inst32(compiler, VPOP | VD4(SLJIT_FS0) | ((sljit_uw)fsaveds << 1)));
+ }
+
+ local_size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 1) & 0x7;
}
- tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = SLJIT_S0; i >= tmp; i--)
- pop |= 1 << reg_map[i];
+ if (frame_size < 0) {
+ lr_dst = TMP_REG2;
+ frame_size = 0;
+ } else if (frame_size > 0) {
+ SLJIT_ASSERT(frame_size == 1 || (frame_size & 0x7) == 0);
+ lr_dst = 0;
+ frame_size &= ~0x7;
+ }
- for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--)
- pop |= 1 << reg_map[i];
+ tmp = SLJIT_S0 - compiler->saveds;
+ i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
+ if (tmp < i) {
+ restored_reg = i;
+ do {
+ reg_list |= (sljit_uw)1 << reg_map[i];
+ } while (--i > tmp);
+ }
+
+ i = compiler->scratches;
+ if (i >= SLJIT_FIRST_SAVED_REG) {
+ restored_reg = i;
+ do {
+ reg_list |= (sljit_uw)1 << reg_map[i];
+ } while (--i >= SLJIT_FIRST_SAVED_REG);
+ }
+
+ if (lr_dst == TMP_REG2 && reg_list == 0) {
+ reg_list |= (sljit_uw)1 << reg_map[TMP_REG2];
+ restored_reg = TMP_REG2;
+ lr_dst = 0;
+ }
+
+ if (lr_dst == 0 && (reg_list & (reg_list - 1)) == 0) {
+ /* The local_size does not include the saved registers. */
+ tmp = 0;
+ if (reg_list != 0) {
+ tmp = 2;
+ if (local_size <= 0xfff) {
+ if (local_size == 0) {
+ SLJIT_ASSERT(restored_reg != TMP_REG2);
+ if (frame_size == 0)
+ return push_inst32(compiler, LDRI | RT4(restored_reg) | RN4(SLJIT_SP) | 0x308);
+ if (frame_size > 2 * SSIZE_OF(sw))
+ return push_inst32(compiler, LDRI | RT4(restored_reg) | RN4(SLJIT_SP) | 0x100 | (sljit_ins)(frame_size - (2 * SSIZE_OF(sw))));
+ }
+
+ if (reg_map[restored_reg] <= 7 && local_size <= 0x3fc)
+ FAIL_IF(push_inst16(compiler, STR_SP | 0x800 | RDN3(restored_reg) | (sljit_ins)(local_size >> 2)));
+ else
+ FAIL_IF(push_inst32(compiler, LDR | RT4(restored_reg) | RN4(SLJIT_SP) | (sljit_ins)local_size));
+ tmp = 1;
+ } else if (frame_size == 0) {
+ frame_size = (restored_reg == TMP_REG2) ? SSIZE_OF(sw) : 2 * SSIZE_OF(sw);
+ tmp = 3;
+ }
+
+ /* Place for the saved register. */
+ if (restored_reg != TMP_REG2)
+ local_size += SSIZE_OF(sw);
+ }
+
+ /* Place for the lr register. */
+ local_size += SSIZE_OF(sw);
+
+ if (frame_size > local_size)
+ FAIL_IF(push_inst16(compiler, SUB_SP_I | ((sljit_ins)(frame_size - local_size) >> 2)));
+ else if (frame_size < local_size)
+ FAIL_IF(emit_add_sp(compiler, (sljit_uw)(local_size - frame_size)));
+
+ if (tmp <= 1)
+ return SLJIT_SUCCESS;
- return (pop & 0xff00)
- ? push_inst32(compiler, POP_W | (1 << 15) | pop)
- : push_inst16(compiler, POP | (1 << 8) | pop);
+ if (tmp == 2) {
+ frame_size -= SSIZE_OF(sw);
+ if (restored_reg != TMP_REG2)
+ frame_size -= SSIZE_OF(sw);
+
+ if (reg_map[restored_reg] <= 7)
+ return push_inst16(compiler, STR_SP | 0x800 | RDN3(restored_reg) | (sljit_ins)(frame_size >> 2));
+
+ return push_inst32(compiler, LDR | RT4(restored_reg) | RN4(SLJIT_SP) | (sljit_ins)frame_size);
+ }
+
+ tmp = (restored_reg == TMP_REG2) ? 0x304 : 0x308;
+ return push_inst32(compiler, LDRI | RT4(restored_reg) | RN4(SLJIT_SP) | (sljit_ins)tmp);
+ }
+
+ if (local_size > 0)
+ FAIL_IF(emit_add_sp(compiler, (sljit_uw)local_size));
+
+ if (!(reg_list & 0xff00) && lr_dst != TMP_REG2) {
+ if (lr_dst == TMP_PC)
+ reg_list |= 1u << 8;
+
+ /* At least one register must be set for POP instruction. */
+ SLJIT_ASSERT(reg_list != 0);
+
+ FAIL_IF(push_inst16(compiler, POP | reg_list));
+ } else {
+ if (lr_dst != 0)
+ reg_list |= (sljit_uw)1 << reg_map[lr_dst];
+
+ /* At least two registers must be set for POP_W instruction. */
+ SLJIT_ASSERT((reg_list & (reg_list - 1)) != 0);
+
+ FAIL_IF(push_inst32(compiler, POP_W | reg_list));
+ }
+
+ if (frame_size > 0)
+ return push_inst16(compiler, SUB_SP_I | (((sljit_ins)frame_size - sizeof(sljit_sw)) >> 2));
+
+ if (lr_dst != 0)
+ return SLJIT_SUCCESS;
+
+ return push_inst16(compiler, ADD_SP_I | 1);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
+
+ return emit_stack_frame_release(compiler, 0);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_to(compiler, src, srcw));
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ srcw = 0;
+ } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG1, src)));
+ src = TMP_REG1;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 1));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
}
/* --------------------------------------------------------------------- */
@@ -1250,8 +1683,8 @@ extern int __aeabi_idivmod(int numerator, int denominator);
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
{
#if !(defined __ARM_FEATURE_IDIV) && !(defined __ARM_ARCH_EXT_IDIV__)
- sljit_sw saved_reg_list[3];
- sljit_sw saved_reg_count;
+ sljit_uw saved_reg_list[3];
+ sljit_uw saved_reg_count;
#endif
CHECK_ERROR();
@@ -1266,10 +1699,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
case SLJIT_LMUL_UW:
case SLJIT_LMUL_SW:
return push_inst32(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
- | (reg_map[SLJIT_R1] << 8)
- | (reg_map[SLJIT_R0] << 12)
- | (reg_map[SLJIT_R0] << 16)
- | reg_map[SLJIT_R1]);
+ | RD4(SLJIT_R1) | RT4(SLJIT_R0) | RN4(SLJIT_R0) | RM4(SLJIT_R1));
#if (defined __ARM_FEATURE_IDIV) || (defined __ARM_ARCH_EXT_IDIV__)
case SLJIT_DIVMOD_UW:
case SLJIT_DIVMOD_SW:
@@ -1314,10 +1744,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_R0, SLJIT_R1)));
FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_R1, TMP_REG1)));
FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM,
- ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__rt_udiv) : SLJIT_FUNC_OFFSET(__rt_sdiv))));
+ ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_ADDR(__rt_udiv) : SLJIT_FUNC_ADDR(__rt_sdiv))));
#elif defined(__GNUC__)
FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM,
- ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
+ ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_ADDR(__aeabi_uidivmod) : SLJIT_FUNC_ADDR(__aeabi_idivmod))));
#else
#error "Software divmod functions are needed"
#endif
@@ -1356,7 +1786,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src, srcw);
- dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
op = GET_OPCODE(op);
if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
@@ -1364,27 +1794,28 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
case SLJIT_MOV:
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
case SLJIT_MOV_P:
flags = WORD_SIZE;
break;
case SLJIT_MOV_U8:
flags = BYTE_SIZE;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_u8)srcw;
break;
case SLJIT_MOV_S8:
flags = BYTE_SIZE | SIGNED;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_s8)srcw;
break;
case SLJIT_MOV_U16:
flags = HALF_SIZE;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_u16)srcw;
break;
case SLJIT_MOV_S16:
flags = HALF_SIZE | SIGNED;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_s16)srcw;
break;
default:
@@ -1393,13 +1824,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
break;
}
- if (src & SLJIT_IMM)
- FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG2, srcw));
+ if (src == SLJIT_IMM)
+ FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG2, (sljit_uw)srcw));
else if (src & SLJIT_MEM) {
FAIL_IF(emit_op_mem(compiler, flags, dst_r, src, srcw, TMP_REG1));
} else {
if (dst_r != TMP_REG1)
- return emit_op_imm(compiler, op, dst_r, TMP_REG2, src);
+ return emit_op_imm(compiler, op, dst_r, TMP_REG2, (sljit_uw)src);
dst_r = src;
}
@@ -1409,22 +1840,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
return emit_op_mem(compiler, flags | STORE, dst_r, dst, dstw, TMP_REG2);
}
- if (op == SLJIT_NEG) {
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- return sljit_emit_op2(compiler, SLJIT_SUB | op_flags, dst, dstw, SLJIT_IMM, 0, src, srcw);
- }
-
+ SLJIT_COMPILE_ASSERT(WORD_SIZE == 0, word_size_must_be_0);
flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0;
+ if (op == SLJIT_REV_U16 || op == SLJIT_REV_S16)
+ flags |= HALF_SIZE;
+
if (src & SLJIT_MEM) {
- FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
+ FAIL_IF(emit_op_mem(compiler, flags, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
- emit_op_imm(compiler, flags | op, dst_r, TMP_REG2, src);
+ emit_op_imm(compiler, flags | op, dst_r, TMP_REG2, (sljit_uw)src);
if (SLJIT_UNLIKELY(dst & SLJIT_MEM))
return emit_op_mem(compiler, flags | STORE, dst_r, dst, dstw, TMP_REG2);
@@ -1439,18 +1866,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
sljit_s32 dst_reg, flags, src2_reg;
CHECK_ERROR();
- CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
- return SLJIT_SUCCESS;
-
- dst_reg = SLOW_IS_REG(dst) ? dst : TMP_REG1;
+ dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1;
flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
- if (src1 & SLJIT_IMM)
+ if (dst == TMP_REG1)
+ flags |= UNUSED_RETURN;
+
+ if (src1 == SLJIT_IMM)
flags |= ARG1_IMM;
else if (src1 & SLJIT_MEM) {
emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src1, src1w, TMP_REG1);
@@ -1459,7 +1886,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
else
src1w = src1;
- if (src2 & SLJIT_IMM)
+ if (src2 == SLJIT_IMM)
flags |= ARG2_IMM;
else if (src2 & SLJIT_MEM) {
src2_reg = (!(flags & ARG1_IMM) && (src1w == TMP_REG1)) ? TMP_REG2 : TMP_REG1;
@@ -1469,16 +1896,81 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
else
src2w = src2;
- if (dst == SLJIT_UNUSED)
- flags |= UNUSED_RETURN;
-
- emit_op_imm(compiler, flags | GET_OPCODE(op), dst_reg, src1w, src2w);
+ emit_op_imm(compiler, flags | GET_OPCODE(op), dst_reg, (sljit_uw)src1w, (sljit_uw)src2w);
if (!(dst & SLJIT_MEM))
return SLJIT_SUCCESS;
return emit_op_mem(compiler, WORD_SIZE | STORE, dst_reg, dst, dstw, TMP_REG2);
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, TMP_REG1, 0, src1, src1w, src2, src2w);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
+{
+ sljit_s32 is_left;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_shift_into(compiler, op, dst_reg, src1_reg, src2_reg, src3, src3w));
+
+ op = GET_OPCODE(op);
+ is_left = (op == SLJIT_SHL || op == SLJIT_MSHL);
+
+ if (src1_reg == src2_reg) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, is_left ? SLJIT_ROTL : SLJIT_ROTR, dst_reg, 0, src1_reg, 0, src3, src3w);
+ }
+
+ ADJUST_LOCAL_OFFSET(src3, src3w);
+
+ if (src3 == SLJIT_IMM) {
+ src3w &= 0x1f;
+
+ if (src3w == 0)
+ return SLJIT_SUCCESS;
+
+ if (IS_2_LO_REGS(dst_reg, src1_reg))
+ FAIL_IF(push_inst16(compiler, (is_left ? LSLSI : LSRSI) | RD3(dst_reg) | RN3(src1_reg) | ((sljit_ins)src3w << 6)));
+ else
+ FAIL_IF(push_inst32(compiler, (is_left ? LSL_WI : LSR_WI) | RD4(dst_reg) | RM4(src1_reg) | IMM5(src3w)));
+
+ src3w = (src3w ^ 0x1f) + 1;
+ return push_inst32(compiler, ORR_W | RD4(dst_reg) | RN4(dst_reg) | RM4(src2_reg) | (is_left ? 0x10 : 0x0) | IMM5(src3w));
+ }
+
+ if (src3 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, src3, src3w, TMP_REG2));
+ src3 = TMP_REG2;
+ }
+
+ if (op == SLJIT_MSHL || op == SLJIT_MLSHR || dst_reg == src3) {
+ FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(src3) | 0x1f));
+ src3 = TMP_REG2;
+ }
+
+ if (dst_reg == src1_reg && IS_2_LO_REGS(dst_reg, src3))
+ FAIL_IF(push_inst16(compiler, (is_left ? LSLS : LSRS) | RD3(dst_reg) | RN3(src3)));
+ else
+ FAIL_IF(push_inst32(compiler, (is_left ? LSL_W : LSR_W) | RD4(dst_reg) | RN4(src1_reg) | RM4(src3)));
+
+ FAIL_IF(push_inst32(compiler, (is_left ? LSR_WI : LSL_WI) | RD4(TMP_REG1) | RM4(src2_reg) | (1 << 6)));
+ FAIL_IF(push_inst32(compiler, EORI | RD4(TMP_REG2) | RN4(src3) | 0x1f));
+ FAIL_IF(push_inst32(compiler, (is_left ? LSR_W : LSL_W) | RD4(TMP_REG1) | RN4(TMP_REG1) | RM4(TMP_REG2)));
+ return push_inst32(compiler, ORR_W | RD4(dst_reg) | RN4(dst_reg) | RM4(TMP_REG1));
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw)
{
@@ -1508,20 +2000,64 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
{
- CHECK_REG_INDEX(check_sljit_get_register_index(reg));
- return reg_map[reg];
+ sljit_s32 size, dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_dst(compiler, op, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ switch (op) {
+ case SLJIT_FAST_ENTER:
+ SLJIT_ASSERT(reg_map[TMP_REG2] == 14);
+
+ if (FAST_IS_REG(dst))
+ return push_inst16(compiler, MOV | SET_REGS44(dst, TMP_REG2));
+ break;
+ case SLJIT_GET_RETURN_ADDRESS:
+ size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds - SLJIT_KEPT_SAVEDS_COUNT(compiler->options), 0);
+
+ if (compiler->fsaveds > 0 || compiler->fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ /* The size of pc is not added above. */
+ if ((size & SSIZE_OF(sw)) == 0)
+ size += SSIZE_OF(sw);
+
+ size += GET_SAVED_FLOAT_REGISTERS_SIZE(compiler->fscratches, compiler->fsaveds, f64);
+ }
+
+ SLJIT_ASSERT(((compiler->local_size + size + SSIZE_OF(sw)) & 0x7) == 0);
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, dst_r, SLJIT_MEM1(SLJIT_SP), compiler->local_size + size, TMP_REG1));
+ break;
+ }
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw, TMP_REG1);
+
+ return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
{
- CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- return (freg_map[reg] << 1);
+ CHECK_REG_INDEX(check_sljit_get_register_index(type, reg));
+
+ if (type == SLJIT_GP_REGISTER)
+ return reg_map[reg];
+
+ if (type == SLJIT_FLOAT_REGISTER || type == SLJIT_SIMD_REG_64)
+ return freg_map[reg];
+
+ if (type != SLJIT_SIMD_REG_128)
+ return freg_map[reg] & ~0x1;
+
+ return -1;
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
CHECK_ERROR();
CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
@@ -1540,112 +2076,132 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
{
sljit_uw imm;
- sljit_sw inst = VSTR_F32 | (flags & (SLJIT_F32_OP | FPU_LOAD));
+ sljit_ins inst = VSTR_F32 | (flags & (SLJIT_32 | FPU_LOAD));
SLJIT_ASSERT(arg & SLJIT_MEM);
/* Fast loads and stores. */
if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
- FAIL_IF(push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(arg & REG_MASK) | RM4(OFFS_REG(arg)) | ((argw & 0x3) << 6)));
+ FAIL_IF(push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(arg & REG_MASK) | RM4(OFFS_REG(arg)) | (((sljit_uw)argw & 0x3) << 6)));
arg = SLJIT_MEM | TMP_REG1;
argw = 0;
}
if ((arg & REG_MASK) && (argw & 0x3) == 0) {
if (!(argw & ~0x3fc))
- return push_inst32(compiler, inst | 0x800000 | RN4(arg & REG_MASK) | DD4(reg) | (argw >> 2));
+ return push_inst32(compiler, inst | 0x800000 | RN4(arg & REG_MASK) | VD4(reg) | ((sljit_uw)argw >> 2));
if (!(-argw & ~0x3fc))
- return push_inst32(compiler, inst | RN4(arg & REG_MASK) | DD4(reg) | (-argw >> 2));
+ return push_inst32(compiler, inst | RN4(arg & REG_MASK) | VD4(reg) | ((sljit_uw)-argw >> 2));
}
if (arg & REG_MASK) {
if (emit_set_delta(compiler, TMP_REG1, arg & REG_MASK, argw) != SLJIT_ERR_UNSUPPORTED) {
FAIL_IF(compiler->error);
- return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg));
+ return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | VD4(reg));
}
- imm = get_imm(argw & ~0x3fc);
+
+ imm = get_imm((sljit_uw)argw & ~(sljit_uw)0x3fc);
if (imm != INVALID_IMM) {
FAIL_IF(push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(arg & REG_MASK) | imm));
- return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg) | ((argw & 0x3fc) >> 2));
+ return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | VD4(reg) | (((sljit_uw)argw & 0x3fc) >> 2));
}
- imm = get_imm(-argw & ~0x3fc);
+
+ imm = get_imm((sljit_uw)-argw & ~(sljit_uw)0x3fc);
if (imm != INVALID_IMM) {
argw = -argw;
FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(arg & REG_MASK) | imm));
- return push_inst32(compiler, inst | RN4(TMP_REG1) | DD4(reg) | ((argw & 0x3fc) >> 2));
+ return push_inst32(compiler, inst | RN4(TMP_REG1) | VD4(reg) | (((sljit_uw)argw & 0x3fc) >> 2));
}
}
- FAIL_IF(load_immediate(compiler, TMP_REG1, argw));
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)argw));
if (arg & REG_MASK)
FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, (arg & REG_MASK))));
- return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg));
+ return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | VD4(reg));
}
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
if (src & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src, srcw));
src = TMP_FREG1;
}
- FAIL_IF(push_inst32(compiler, VCVT_S32_F32 | (op & SLJIT_F32_OP) | DD4(TMP_FREG1) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VCVT_S32_F32 | (op & SLJIT_32) | VD4(TMP_FREG1) | VM4(src)));
if (FAST_IS_REG(dst))
- return push_inst32(compiler, VMOV | (1 << 20) | RT4(dst) | DN4(TMP_FREG1));
+ return push_inst32(compiler, VMOV | (1 << 20) | RT4(dst) | VN4(TMP_FREG1));
/* Store the integer value from a VFP register. */
return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw);
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+static sljit_s32 sljit_emit_fop1_conv_f64_from_w(struct sljit_compiler *compiler, sljit_ins ins,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
- op ^= SLJIT_F32_OP;
-
if (FAST_IS_REG(src))
- FAIL_IF(push_inst32(compiler, VMOV | RT4(src) | DN4(TMP_FREG1)));
+ FAIL_IF(push_inst32(compiler, VMOV | RT4(src) | VN4(TMP_FREG1)));
else if (src & SLJIT_MEM) {
/* Load the integer value into a VFP register. */
FAIL_IF(emit_fop_mem(compiler, FPU_LOAD, TMP_FREG1, src, srcw));
}
else {
- FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
- FAIL_IF(push_inst32(compiler, VMOV | RT4(TMP_REG1) | DN4(TMP_FREG1)));
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)srcw));
+ FAIL_IF(push_inst32(compiler, VMOV | RT4(TMP_REG1) | VN4(TMP_FREG1)));
}
- FAIL_IF(push_inst32(compiler, VCVT_F32_S32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(TMP_FREG1)));
+ FAIL_IF(push_inst32(compiler, ins | VD4(dst_r) | VM4(TMP_FREG1)));
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
+ return emit_fop_mem(compiler, (ins & SLJIT_32), TMP_FREG1, dst, dstw);
return SLJIT_SUCCESS;
}
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ return sljit_emit_fop1_conv_f64_from_w(compiler, VCVT_F32_S32 | (~op & SLJIT_32), dst, dstw, src, srcw);
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ return sljit_emit_fop1_conv_f64_from_w(compiler, VCVT_F32_U32 | (~op & SLJIT_32), dst, dstw, src, srcw);
+}
+
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
if (src1 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
- FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_F32_OP) | DD4(src1) | DM4(src2)));
- return push_inst32(compiler, VMRS);
+ FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_32) | VD4(src1) | VM4(src2)));
+ FAIL_IF(push_inst32(compiler, VMRS));
+
+ if (GET_FLAG_TYPE(op) != SLJIT_UNORDERED_OR_EQUAL)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst16(compiler, IT | (0x6 << 4) | 0x8));
+ return push_inst16(compiler, CMP /* Rm, Rn = r0 */);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1656,16 +2212,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
CHECK_ERROR();
- SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_32 == 0x100), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (GET_OPCODE(op) != SLJIT_CONV_F64_FROM_F32)
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
if (src & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, dst_r, src, srcw);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, dst_r, src, srcw));
src = dst_r;
}
@@ -1673,25 +2229,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1)
- FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src)));
else
dst_r = src;
}
break;
case SLJIT_NEG_F64:
- FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src)));
break;
case SLJIT_ABS_F64:
- FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src)));
break;
case SLJIT_CONV_F64_FROM_F32:
- FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
- op ^= SLJIT_F32_OP;
+ FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src)));
+ op ^= SLJIT_32;
break;
}
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, (op & SLJIT_F32_OP), dst_r, dst, dstw);
+ return emit_fop_mem(compiler, (op & SLJIT_32), dst_r, dst, dstw);
return SLJIT_SUCCESS;
}
@@ -1708,57 +2264,137 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src1 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
switch (GET_OPCODE(op)) {
case SLJIT_ADD_F64:
- FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_32) | VD4(dst_r) | VN4(src1) | VM4(src2)));
break;
case SLJIT_SUB_F64:
- FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_32) | VD4(dst_r) | VN4(src1) | VM4(src2)));
break;
case SLJIT_MUL_F64:
- FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_32) | VD4(dst_r) | VN4(src1) | VM4(src2)));
break;
case SLJIT_DIV_F64:
- FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_32) | VD4(dst_r) | VN4(src1) | VM4(src2)));
break;
+ case SLJIT_COPYSIGN_F64:
+ FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | VN4(src2) | RT4(TMP_REG1) | ((op & SLJIT_32) ? (1 << 7) : 0)));
+ FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src1)));
+ FAIL_IF(push_inst32(compiler, CMPI_W | RN4(TMP_REG1) | 0));
+ FAIL_IF(push_inst16(compiler, IT | (0xb << 4) | 0x8));
+ return push_inst32(compiler, VNEG_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(dst_r));
}
if (!(dst & SLJIT_MEM))
return SLJIT_SUCCESS;
- return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
+ return emit_fop_mem(compiler, (op & SLJIT_32), TMP_FREG1, dst, dstw);
}
-#undef FPU_LOAD
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
+{
+#if defined(__ARM_NEON) && __ARM_NEON
+ sljit_u32 exp;
+ sljit_ins ins;
+#endif /* NEON */
+ union {
+ sljit_u32 imm;
+ sljit_f32 value;
+ } u;
-/* --------------------------------------------------------------------- */
-/* Other instructions */
-/* --------------------------------------------------------------------- */
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
+
+ u.value = value;
+
+#if defined(__ARM_NEON) && __ARM_NEON
+ if ((u.imm << (32 - 19)) == 0) {
+ exp = (u.imm >> (23 + 2)) & 0x3f;
+
+ if (exp == 0x20 || exp == 0x1f) {
+ ins = ((u.imm >> 24) & 0x80) | ((u.imm >> 19) & 0x7f);
+ return push_inst32(compiler, (VMOV_F32 ^ (1 << 6)) | ((ins & 0xf0) << 12) | VD4(freg) | (ins & 0xf));
+ }
+ }
+#endif /* NEON */
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm));
+ return push_inst32(compiler, VMOV | VN4(freg) | RT4(TMP_REG1));
+}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
{
+#if defined(__ARM_NEON) && __ARM_NEON
+ sljit_u32 exp;
+ sljit_ins ins;
+#endif /* NEON */
+ union {
+ sljit_u32 imm[2];
+ sljit_f64 value;
+ } u;
+
CHECK_ERROR();
- CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
- ADJUST_LOCAL_OFFSET(dst, dstw);
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
- SLJIT_ASSERT(reg_map[TMP_REG2] == 14);
+ u.value = value;
- if (FAST_IS_REG(dst))
- return push_inst16(compiler, MOV | SET_REGS44(dst, TMP_REG2));
+#if defined(__ARM_NEON) && __ARM_NEON
+ if (u.imm[0] == 0 && (u.imm[1] << (64 - 48)) == 0) {
+ exp = (u.imm[1] >> ((52 - 32) + 2)) & 0x1ff;
+
+ if (exp == 0x100 || exp == 0xff) {
+ ins = ((u.imm[1] >> (56 - 32)) & 0x80) | ((u.imm[1] >> (48 - 32)) & 0x7f);
+ return push_inst32(compiler, (VMOV_F32 ^ (1 << 6)) | (1 << 8) | ((ins & 0xf0) << 12) | VD4(freg) | (ins & 0xf));
+ }
+ }
+#endif /* NEON */
- /* Memory. */
- return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw, TMP_REG1);
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm[0]));
+ if (u.imm[0] == u.imm[1])
+ return push_inst32(compiler, VMOV2 | RN4(TMP_REG1) | RT4(TMP_REG1) | VM4(freg));
+
+ FAIL_IF(load_immediate(compiler, TMP_REG2, u.imm[1]));
+ return push_inst32(compiler, VMOV2 | RN4(TMP_REG2) | RT4(TMP_REG1) | VM4(freg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_s32 reg2;
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ if (reg & REG_PAIR_MASK) {
+ reg2 = REG_PAIR_SECOND(reg);
+ reg = REG_PAIR_FIRST(reg);
+
+ inst = VMOV2 | RN4(reg) | RT4(reg2) | VM4(freg);
+ } else {
+ inst = VMOV | VN4(freg) | RT4(reg);
+
+ if (!(op & SLJIT_32))
+ inst |= 1 << 7;
+ }
+
+ if (GET_OPCODE(op) == SLJIT_COPY_FROM_F64)
+ inst |= 1 << 20;
+
+ return push_inst32(compiler, inst);
}
/* --------------------------------------------------------------------- */
@@ -1769,55 +2405,85 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type)
{
switch (type) {
case SLJIT_EQUAL:
- case SLJIT_EQUAL_F64:
+ case SLJIT_ATOMIC_STORED:
+ case SLJIT_F_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
return 0x0;
case SLJIT_NOT_EQUAL:
- case SLJIT_NOT_EQUAL_F64:
+ case SLJIT_ATOMIC_NOT_STORED:
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
return 0x1;
+ case SLJIT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD)
+ return 0x2;
+ /* fallthrough */
+
case SLJIT_LESS:
- case SLJIT_LESS_F64:
return 0x3;
+ case SLJIT_NOT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD)
+ return 0x3;
+ /* fallthrough */
+
case SLJIT_GREATER_EQUAL:
- case SLJIT_GREATER_EQUAL_F64:
return 0x2;
case SLJIT_GREATER:
- case SLJIT_GREATER_F64:
+ case SLJIT_UNORDERED_OR_GREATER:
return 0x8;
case SLJIT_LESS_EQUAL:
- case SLJIT_LESS_EQUAL_F64:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
return 0x9;
case SLJIT_SIG_LESS:
+ case SLJIT_UNORDERED_OR_LESS:
return 0xb;
case SLJIT_SIG_GREATER_EQUAL:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
return 0xa;
case SLJIT_SIG_GREATER:
+ case SLJIT_F_GREATER:
+ case SLJIT_ORDERED_GREATER:
return 0xc;
case SLJIT_SIG_LESS_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
return 0xd;
case SLJIT_OVERFLOW:
- if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
+ if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)))
return 0x1;
+ /* fallthrough */
- case SLJIT_UNORDERED_F64:
+ case SLJIT_UNORDERED:
return 0x6;
case SLJIT_NOT_OVERFLOW:
- if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
+ if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)))
return 0x0;
+ /* fallthrough */
- case SLJIT_ORDERED_F64:
+ case SLJIT_ORDERED:
return 0x7;
+ case SLJIT_F_LESS:
+ case SLJIT_ORDERED_LESS:
+ return 0x4;
+
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ return 0x5;
+
default: /* SLJIT_JUMP */
SLJIT_UNREACHABLE();
return 0xe;
@@ -1874,113 +2540,126 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
#ifdef __SOFTFP__
-static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src)
+static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src, sljit_u32 *extra_space)
{
- sljit_s32 stack_offset = 0;
- sljit_s32 arg_count = 0;
- sljit_s32 word_arg_offset = 0;
- sljit_s32 float_arg_count = 0;
+ sljit_u32 is_tail_call = *extra_space & SLJIT_CALL_RETURN;
+ sljit_u32 offset = 0;
+ sljit_u32 word_arg_offset = 0;
+ sljit_u32 float_arg_count = 0;
sljit_s32 types = 0;
- sljit_s32 src_offset = 4 * sizeof(sljit_sw);
+ sljit_u32 src_offset = 4 * sizeof(sljit_sw);
sljit_u8 offsets[4];
+ sljit_u8 *offset_ptr = offsets;
if (src && FAST_IS_REG(*src))
- src_offset = reg_map[*src] * sizeof(sljit_sw);
+ src_offset = (sljit_u32)reg_map[*src] * sizeof(sljit_sw);
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+ types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- offsets[arg_count] = (sljit_u8)stack_offset;
- stack_offset += sizeof(sljit_f32);
- arg_count++;
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (offset & 0x7)
+ offset += sizeof(sljit_sw);
+ *offset_ptr++ = (sljit_u8)offset;
+ offset += sizeof(sljit_f64);
float_arg_count++;
break;
- case SLJIT_ARG_TYPE_F64:
- if (stack_offset & 0x7)
- stack_offset += sizeof(sljit_sw);
- offsets[arg_count] = (sljit_u8)stack_offset;
- stack_offset += sizeof(sljit_f64);
- arg_count++;
+ case SLJIT_ARG_TYPE_F32:
+ *offset_ptr++ = (sljit_u8)offset;
+ offset += sizeof(sljit_f32);
float_arg_count++;
break;
default:
- offsets[arg_count] = (sljit_u8)stack_offset;
- stack_offset += sizeof(sljit_sw);
- arg_count++;
+ *offset_ptr++ = (sljit_u8)offset;
+ offset += sizeof(sljit_sw);
word_arg_offset += sizeof(sljit_sw);
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
- if (stack_offset > 16)
- FAIL_IF(push_inst16(compiler, SUB_SP | (((stack_offset - 16) + 0x7) & ~0x7) >> 2));
+ if (offset > 4 * sizeof(sljit_sw) && (!is_tail_call || offset > compiler->args_size)) {
+ /* Keep lr register on the stack. */
+ if (is_tail_call)
+ offset += sizeof(sljit_sw);
+
+ offset = ((offset - 4 * sizeof(sljit_sw)) + 0x7) & ~(sljit_uw)0x7;
+
+ *extra_space = offset;
+
+ if (is_tail_call)
+ FAIL_IF(emit_stack_frame_release(compiler, (sljit_s32)offset));
+ else
+ FAIL_IF(push_inst16(compiler, SUB_SP_I | (offset >> 2)));
+ } else {
+ if (is_tail_call)
+ FAIL_IF(emit_stack_frame_release(compiler, -1));
+ *extra_space = 0;
+ }
SLJIT_ASSERT(reg_map[TMP_REG1] == 12);
/* Process arguments in reversed direction. */
while (types) {
- switch (types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- arg_count--;
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
float_arg_count--;
- stack_offset = offsets[arg_count];
+ offset = *(--offset_ptr);
- if (stack_offset < 16) {
- if (src_offset == stack_offset) {
+ SLJIT_ASSERT((offset & 0x7) == 0);
+
+ if (offset < 4 * sizeof(sljit_sw)) {
+ if (src_offset == offset || src_offset == offset + sizeof(sljit_sw)) {
FAIL_IF(push_inst16(compiler, MOV | (src_offset << 1) | 4 | (1 << 7)));
*src = TMP_REG1;
}
- FAIL_IF(push_inst32(compiler, VMOV | 0x100000 | (float_arg_count << 16) | (stack_offset << 10)));
+ FAIL_IF(push_inst32(compiler, VMOV2 | 0x100000 | (offset << 10) | ((offset + sizeof(sljit_sw)) << 14) | float_arg_count));
} else
- FAIL_IF(push_inst32(compiler, VSTR_F32 | 0x800000 | RN4(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2)));
+ FAIL_IF(push_inst32(compiler, VSTR_F32 | 0x800100 | RN4(SLJIT_SP)
+ | (float_arg_count << 12) | ((offset - 4 * sizeof(sljit_sw)) >> 2)));
break;
- case SLJIT_ARG_TYPE_F64:
- arg_count--;
+ case SLJIT_ARG_TYPE_F32:
float_arg_count--;
- stack_offset = offsets[arg_count];
-
- SLJIT_ASSERT((stack_offset & 0x7) == 0);
+ offset = *(--offset_ptr);
- if (stack_offset < 16) {
- if (src_offset == stack_offset || src_offset == stack_offset + sizeof(sljit_sw)) {
+ if (offset < 4 * sizeof(sljit_sw)) {
+ if (src_offset == offset) {
FAIL_IF(push_inst16(compiler, MOV | (src_offset << 1) | 4 | (1 << 7)));
*src = TMP_REG1;
}
- FAIL_IF(push_inst32(compiler, VMOV2 | 0x100000 | (stack_offset << 10) | ((stack_offset + sizeof(sljit_sw)) << 14) | float_arg_count));
+ FAIL_IF(push_inst32(compiler, VMOV | 0x100000 | (float_arg_count << 16) | (offset << 10)));
} else
- FAIL_IF(push_inst32(compiler, VSTR_F32 | 0x800100 | RN4(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2)));
+ FAIL_IF(push_inst32(compiler, VSTR_F32 | 0x800000 | RN4(SLJIT_SP)
+ | (float_arg_count << 12) | ((offset - 4 * sizeof(sljit_sw)) >> 2)));
break;
default:
- arg_count--;
word_arg_offset -= sizeof(sljit_sw);
- stack_offset = offsets[arg_count];
+ offset = *(--offset_ptr);
- SLJIT_ASSERT(stack_offset >= word_arg_offset);
+ SLJIT_ASSERT(offset >= word_arg_offset);
- if (stack_offset != word_arg_offset) {
- if (stack_offset < 16) {
- if (src_offset == stack_offset) {
+ if (offset != word_arg_offset) {
+ if (offset < 4 * sizeof(sljit_sw)) {
+ if (src_offset == offset) {
FAIL_IF(push_inst16(compiler, MOV | (src_offset << 1) | 4 | (1 << 7)));
*src = TMP_REG1;
}
else if (src_offset == word_arg_offset) {
- *src = 1 + (stack_offset >> 2);
- src_offset = stack_offset;
+ *src = (sljit_s32)(1 + (offset >> 2));
+ src_offset = offset;
}
- FAIL_IF(push_inst16(compiler, MOV | (stack_offset >> 2) | (word_arg_offset << 1)));
+ FAIL_IF(push_inst16(compiler, MOV | (offset >> 2) | (word_arg_offset << 1)));
} else
- FAIL_IF(push_inst16(compiler, STR_SP | (word_arg_offset << 6) | ((stack_offset - 16) >> 2)));
+ FAIL_IF(push_inst16(compiler, STR_SP | (word_arg_offset << 6) | ((offset - 4 * sizeof(sljit_sw)) >> 2)));
}
break;
}
- types >>= SLJIT_DEF_SHIFT;
+ types >>= SLJIT_ARG_SHIFT;
}
return SLJIT_SUCCESS;
@@ -1988,83 +2667,48 @@ static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit
static sljit_s32 softfloat_post_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
{
- sljit_s32 stack_size = 0;
-
- if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32)
- FAIL_IF(push_inst32(compiler, VMOV | (0 << 16) | (0 << 12)));
- if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64)
+ if ((arg_types & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F64)
FAIL_IF(push_inst32(compiler, VMOV2 | (1 << 16) | (0 << 12) | 0));
+ if ((arg_types & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F32)
+ FAIL_IF(push_inst32(compiler, VMOV | (0 << 16) | (0 << 12)));
- arg_types >>= SLJIT_DEF_SHIFT;
-
- while (arg_types) {
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- stack_size += sizeof(sljit_f32);
- break;
- case SLJIT_ARG_TYPE_F64:
- if (stack_size & 0x7)
- stack_size += sizeof(sljit_sw);
- stack_size += sizeof(sljit_f64);
- break;
- default:
- stack_size += sizeof(sljit_sw);
- break;
- }
-
- arg_types >>= SLJIT_DEF_SHIFT;
- }
-
- if (stack_size <= 16)
- return SLJIT_SUCCESS;
-
- return push_inst16(compiler, ADD_SP | ((((stack_size - 16) + 0x7) & ~0x7) >> 2));
+ return SLJIT_SUCCESS;
}
#else
static sljit_s32 hardfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
{
- sljit_u32 remap = 0;
- sljit_u32 offset = 0;
- sljit_u32 new_offset, mask;
+ sljit_u32 offset = SLJIT_FR0;
+ sljit_u32 new_offset = SLJIT_FR0;
+ sljit_u32 f32_offset = 0;
/* Remove return value. */
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32) {
- new_offset = 0;
- mask = 1;
-
- while (remap & mask) {
- new_offset++;
- mask <<= 1;
- }
- remap |= mask;
-
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
if (offset != new_offset)
- FAIL_IF(push_inst32(compiler, VMOV_F32 | DD4((new_offset >> 1) + 1)
- | ((new_offset & 0x1) ? 0x400000 : 0) | DM4((offset >> 1) + 1)));
-
- offset += 2;
- }
- else if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64) {
- new_offset = 0;
- mask = 3;
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | SLJIT_32 | VD4(new_offset) | VM4(offset)));
- while (remap & mask) {
- new_offset += 2;
- mask <<= 2;
+ new_offset++;
+ offset++;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ if (f32_offset != 0) {
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | 0x400000 | VD4(f32_offset) | VM4(offset)));
+ f32_offset = 0;
+ } else {
+ if (offset != new_offset)
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | 0x400000 | VD4(new_offset) | VM4(offset)));
+ f32_offset = new_offset;
+ new_offset++;
}
- remap |= mask;
-
- if (offset != new_offset)
- FAIL_IF(push_inst32(compiler, VMOV_F32 | SLJIT_F32_OP | DD4((new_offset >> 1) + 1) | DM4((offset >> 1) + 1)));
-
- offset += 2;
+ offset++;
+ break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
return SLJIT_SUCCESS;
@@ -2077,34 +2721,56 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
{
#ifdef __SOFTFP__
struct sljit_jump *jump;
+ sljit_u32 extra_space = (sljit_u32)type;
#endif
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
#ifdef __SOFTFP__
- PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL));
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG) {
+ PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL, &extra_space));
+ SLJIT_ASSERT((extra_space & 0x7) == 0);
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if ((type & SLJIT_CALL_RETURN) && extra_space == 0)
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
- jump = sljit_emit_jump(compiler, type);
- PTR_FAIL_IF(jump == NULL);
+ SLJIT_SKIP_CHECKS(compiler);
+ jump = sljit_emit_jump(compiler, type);
+ PTR_FAIL_IF(jump == NULL);
- PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types));
- return jump;
-#else
- PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+ if (extra_space > 0) {
+ if (type & SLJIT_CALL_RETURN)
+ PTR_FAIL_IF(push_inst32(compiler, LDR | RT4(TMP_REG2)
+ | RN4(SLJIT_SP) | (extra_space - sizeof(sljit_sw))));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ PTR_FAIL_IF(push_inst16(compiler, ADD_SP_I | (extra_space >> 2)));
+
+ if (type & SLJIT_CALL_RETURN) {
+ PTR_FAIL_IF(push_inst16(compiler, BX | RN3(TMP_REG2)));
+ return jump;
+ }
+ }
+
+ SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN));
+ PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types));
+ return jump;
+ }
+#endif /* __SOFTFP__ */
+ if (type & SLJIT_CALL_RETURN) {
+ /* ldmia sp!, {..., lr} */
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, -1));
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
+ }
+
+#ifndef __SOFTFP__
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+#endif /* !__SOFTFP__ */
+
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_jump(compiler, type);
-#endif
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
@@ -2117,7 +2783,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
SLJIT_ASSERT(reg_map[TMP_REG1] != 14);
- if (!(src & SLJIT_IMM)) {
+ if (src != SLJIT_IMM) {
if (FAST_IS_REG(src)) {
SLJIT_ASSERT(reg_map[src] != 14);
return push_inst16(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RN3(src));
@@ -2132,7 +2798,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0));
- jump->u.target = srcw;
+ jump->u.target = (sljit_uw)srcw;
FAIL_IF(emit_imm32_const(compiler, TMP_REG1, 0));
jump->addr = compiler->size;
@@ -2143,37 +2809,92 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
sljit_s32 arg_types,
sljit_s32 src, sljit_sw srcw)
{
+#ifdef __SOFTFP__
+ sljit_u32 extra_space = (sljit_u32)type;
+#endif
+
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
-#ifdef __SOFTFP__
if (src & SLJIT_MEM) {
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
- FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src));
+ if ((type & SLJIT_CALL_RETURN) && (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) {
+ FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG1, src)));
+ src = TMP_REG1;
+ }
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+#ifdef __SOFTFP__
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG) {
+ FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src, &extra_space));
+ SLJIT_ASSERT((extra_space & 0x7) == 0);
- FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
+ if ((type & SLJIT_CALL_RETURN) && extra_space == 0)
+ type = SLJIT_JUMP;
- return softfloat_post_call_with_args(compiler, arg_types);
-#else /* !__SOFTFP__ */
- FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+ SLJIT_SKIP_CHECKS(compiler);
+ FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if (extra_space > 0) {
+ if (type & SLJIT_CALL_RETURN)
+ FAIL_IF(push_inst32(compiler, LDR | RT4(TMP_REG2)
+ | RN4(SLJIT_SP) | (extra_space - sizeof(sljit_sw))));
- return sljit_emit_ijump(compiler, type, src, srcw);
+ FAIL_IF(push_inst16(compiler, ADD_SP_I | (extra_space >> 2)));
+
+ if (type & SLJIT_CALL_RETURN)
+ return push_inst16(compiler, BX | RN3(TMP_REG2));
+ }
+
+ SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN));
+ return softfloat_post_call_with_args(compiler, arg_types);
+ }
#endif /* __SOFTFP__ */
+
+ if (type & SLJIT_CALL_RETURN) {
+ /* ldmia sp!, {..., lr} */
+ FAIL_IF(emit_stack_frame_release(compiler, -1));
+ type = SLJIT_JUMP;
+ }
+
+#ifndef __SOFTFP__
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+#endif /* !__SOFTFP__ */
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, type, src, srcw);
+}
+
+#ifdef __SOFTFP__
+
+static SLJIT_INLINE sljit_s32 emit_fmov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+{
+ if (compiler->options & SLJIT_ENTER_REG_ARG) {
+ if (src == SLJIT_FR0)
+ return SLJIT_SUCCESS;
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_fop1(compiler, op, SLJIT_RETURN_FREG, 0, src, srcw);
+ }
+
+ if (FAST_IS_REG(src)) {
+ if (op & SLJIT_32)
+ return push_inst32(compiler, VMOV | (1 << 20) | VN4(src) | RT4(SLJIT_R0));
+ return push_inst32(compiler, VMOV2 | (1 << 20) | VM4(src) | RT4(SLJIT_R0) | RN4(SLJIT_R1));
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
+
+ if (op & SLJIT_32)
+ return sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_R0, 0, src, srcw);
+ return sljit_emit_mem(compiler, SLJIT_MOV, SLJIT_REG_PAIR(SLJIT_R0, SLJIT_R1), src, srcw);
}
+#endif /* __SOFTFP__ */
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
@@ -2186,7 +2907,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
ADJUST_LOCAL_OFFSET(dst, dstw);
op = GET_OPCODE(op);
- cc = get_cc(compiler, type & 0xff);
+ cc = get_cc(compiler, type);
dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
if (op < SLJIT_ADD) {
@@ -2227,25 +2948,47 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(TMP_REG1) | RM4(dst_r));
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
{
sljit_uw cc, tmp;
CHECK_ERROR();
- CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
+ CHECK(check_sljit_emit_select(compiler, type, dst_reg, src1, src1w, src2_reg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (src2_reg != dst_reg && src1 == dst_reg) {
+ src1 = src2_reg;
+ src1w = 0;
+ src2_reg = dst_reg;
+ type ^= 0x1;
+ }
- dst_reg &= ~SLJIT_I32_OP;
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, (src2_reg != dst_reg) ? dst_reg : TMP_REG1, src1, src1w, TMP_REG2));
+
+ if (src2_reg != dst_reg) {
+ src1 = src2_reg;
+ src1w = 0;
+ type ^= 0x1;
+ } else {
+ src1 = TMP_REG1;
+ src1w = 0;
+ }
+ } else if (dst_reg != src2_reg)
+ FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(dst_reg, src2_reg)));
- cc = get_cc(compiler, type & 0xff);
+ cc = get_cc(compiler, type & ~SLJIT_32);
- if (!(src & SLJIT_IMM)) {
+ if (src1 != SLJIT_IMM) {
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8));
- return push_inst16(compiler, MOV | SET_REGS44(dst_reg, src));
+ return push_inst16(compiler, MOV | SET_REGS44(dst_reg, src1));
}
- tmp = (sljit_uw) srcw;
+ tmp = (sljit_uw)src1w;
if (tmp < 0x10000) {
/* set low 16 bits, set hi 16 bits to 0. */
@@ -2254,13 +2997,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
| COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff));
}
- tmp = get_imm(srcw);
+ tmp = get_imm((sljit_uw)src1w);
if (tmp != INVALID_IMM) {
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8));
return push_inst32(compiler, MOV_WI | RD4(dst_reg) | tmp);
}
- tmp = get_imm(~srcw);
+ tmp = get_imm(~(sljit_uw)src1w);
if (tmp != INVALID_IMM) {
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8));
return push_inst32(compiler, MVN_WI | RD4(dst_reg) | tmp);
@@ -2268,23 +3011,228 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | ((cc & 0x1) << 3) | 0x4));
- tmp = (sljit_uw) srcw;
+ tmp = (sljit_uw)src1w;
FAIL_IF(push_inst32(compiler, MOVW | RD4(dst_reg)
| COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff)));
return push_inst32(compiler, MOVT | RD4(dst_reg)
| COPY_BITS(tmp, 12 + 16, 16, 4) | COPY_BITS(tmp, 11 + 16, 26, 1) | COPY_BITS(tmp, 8 + 16, 12, 3) | ((tmp & 0xff0000) >> 16));
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fselect(compiler, type, dst_freg, src1, src1w, src2_freg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ type ^= SLJIT_32;
+
+ if (dst_freg != src2_freg) {
+ if (dst_freg == src1) {
+ src1 = src2_freg;
+ src1w = 0;
+ type ^= 0x1;
+ } else
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | (type & SLJIT_32) | VD4(dst_freg) | VM4(src2_freg)));
+ }
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_fop_mem(compiler, (type & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w));
+ src1 = TMP_FREG1;
+ }
+
+ FAIL_IF(push_inst16(compiler, IT | (get_cc(compiler, type & ~SLJIT_32) << 4) | 0x8));
+ return push_inst32(compiler, VMOV_F32 | (type & SLJIT_32) | VD4(dst_freg) | VM4(src1));
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 reg,
sljit_s32 mem, sljit_sw memw)
{
sljit_s32 flags;
- sljit_ins inst;
+ sljit_uw imm, tmp;
CHECK_ERROR();
CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+ if (!(reg & REG_PAIR_MASK))
+ return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
+
+ if (type & (SLJIT_MEM_UNALIGNED | SLJIT_MEM_ALIGNED_16 | SLJIT_MEM_ALIGNED_32)) {
+ if ((mem & REG_MASK) == 0) {
+ if ((memw & 0xfff) >= (0x1000 - SSIZE_OF(sw))) {
+ imm = get_imm((sljit_uw)((memw + 0x1000) & ~0xfff));
+
+ if (imm != INVALID_IMM)
+ memw = (memw & 0xfff) - 0x1000;
+ } else {
+ imm = get_imm((sljit_uw)(memw & ~0xfff));
+
+ if (imm != INVALID_IMM)
+ memw &= 0xfff;
+ }
+
+ if (imm == INVALID_IMM) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw));
+ memw = 0;
+ } else
+ FAIL_IF(push_inst32(compiler, MOV_WI | RD4(TMP_REG1) | imm));
+
+ mem = SLJIT_MEM1(TMP_REG1);
+ } else if (mem & OFFS_REG_MASK) {
+ FAIL_IF(push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(mem & REG_MASK) | RM4(OFFS_REG(mem)) | ((sljit_uw)(memw & 0x3) << 6)));
+ memw = 0;
+ mem = SLJIT_MEM1(TMP_REG1);
+ } else if (memw < -0xff) {
+ /* Zero value can be included in the first case. */
+ if ((-memw & 0xfff) <= SSIZE_OF(sw))
+ tmp = (sljit_uw)((-memw + 0x7ff) & ~0x7ff);
+ else
+ tmp = (sljit_uw)((-memw + 0xfff) & ~0xfff);
+
+ SLJIT_ASSERT(tmp >= (sljit_uw)-memw);
+ imm = get_imm(tmp);
+
+ if (imm != INVALID_IMM) {
+ FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(mem & REG_MASK) | imm));
+ memw += (sljit_sw)tmp;
+ SLJIT_ASSERT(memw >= 0 && memw <= 0xfff - SSIZE_OF(sw));
+ } else {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw));
+ FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem & REG_MASK)));
+ memw = 0;
+ }
+
+ mem = SLJIT_MEM1(TMP_REG1);
+ } else if (memw >= (0x1000 - SSIZE_OF(sw))) {
+ if ((memw & 0xfff) >= (0x1000 - SSIZE_OF(sw))) {
+ imm = get_imm((sljit_uw)((memw + 0x1000) & ~0xfff));
+
+ if (imm != INVALID_IMM)
+ memw = (memw & 0xfff) - 0x1000;
+ } else {
+ imm = get_imm((sljit_uw)(memw & ~0xfff));
+
+ if (imm != INVALID_IMM)
+ memw &= 0xfff;
+ }
+
+ if (imm != INVALID_IMM) {
+ SLJIT_ASSERT(memw >= -0xff && memw <= 0xfff);
+ FAIL_IF(push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(mem & REG_MASK) | imm));
+ } else {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw));
+ FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem & REG_MASK)));
+ memw = 0;
+ }
+
+ mem = SLJIT_MEM1(TMP_REG1);
+ }
+
+ flags = WORD_SIZE;
+
+ SLJIT_ASSERT(memw <= 0xfff - SSIZE_OF(sw) && memw >= -0xff);
+
+ if (type & SLJIT_MEM_STORE) {
+ flags |= STORE;
+ } else if (REG_PAIR_FIRST(reg) == (mem & REG_MASK)) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, REG_PAIR_SECOND(reg), mem, memw + SSIZE_OF(sw), TMP_REG2));
+ return emit_op_mem(compiler, WORD_SIZE, REG_PAIR_FIRST(reg), mem, memw, TMP_REG2);
+ }
+
+ FAIL_IF(emit_op_mem(compiler, flags, REG_PAIR_FIRST(reg), mem, memw, TMP_REG2));
+ return emit_op_mem(compiler, flags, REG_PAIR_SECOND(reg), mem, memw + SSIZE_OF(sw), TMP_REG2);
+ }
+
+ flags = 1 << 23;
+
+ if ((mem & REG_MASK) == 0) {
+ tmp = (sljit_uw)(memw & 0x7fc);
+ imm = get_imm((sljit_uw)((memw + (tmp <= 0x400 ? 0 : 0x400)) & ~0x3fc));
+
+ if (imm == INVALID_IMM) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw));
+ memw = 0;
+ } else {
+ FAIL_IF(push_inst32(compiler, MOV_WI | RD4(TMP_REG1) | imm));
+ memw = (memw & 0x3fc) >> 2;
+
+ if (tmp > 0x400) {
+ memw = 0x100 - memw;
+ flags = 0;
+ }
+
+ SLJIT_ASSERT(memw >= 0 && memw <= 0xff);
+ }
+
+ mem = SLJIT_MEM1(TMP_REG1);
+ } else if (mem & OFFS_REG_MASK) {
+ FAIL_IF(push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(mem & REG_MASK) | RM4(OFFS_REG(mem)) | ((sljit_uw)(memw & 0x3) << 6)));
+ memw = 0;
+ mem = SLJIT_MEM1(TMP_REG1);
+ } else if (memw < 0) {
+ if ((-memw & ~0x3fc) == 0) {
+ flags = 0;
+ memw = -memw >> 2;
+ } else {
+ tmp = (sljit_uw)(-memw & 0x7fc);
+ imm = get_imm((sljit_uw)((-memw + (tmp <= 0x400 ? 0 : 0x400)) & ~0x3fc));
+
+ if (imm != INVALID_IMM) {
+ FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(mem & REG_MASK) | imm));
+ memw = (-memw & 0x3fc) >> 2;
+
+ if (tmp <= 0x400)
+ flags = 0;
+ else
+ memw = 0x100 - memw;
+ } else {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw));
+ FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem & REG_MASK)));
+ memw = 0;
+ }
+
+ mem = SLJIT_MEM1(TMP_REG1);
+ }
+ } else if ((memw & ~0x3fc) != 0) {
+ tmp = (sljit_uw)(memw & 0x7fc);
+ imm = get_imm((sljit_uw)((memw + (tmp <= 0x400 ? 0 : 0x400)) & ~0x3fc));
+
+ if (imm != INVALID_IMM) {
+ FAIL_IF(push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(mem & REG_MASK) | imm));
+ memw = (memw & 0x3fc) >> 2;
+
+ if (tmp > 0x400) {
+ memw = 0x100 - memw;
+ flags = 0;
+ }
+ } else {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw));
+ FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem & REG_MASK)));
+ memw = 0;
+ }
+
+ mem = SLJIT_MEM1(TMP_REG1);
+ } else
+ memw >>= 2;
+
+ SLJIT_ASSERT(memw >= 0 && memw <= 0xff);
+ return push_inst32(compiler, ((type & SLJIT_MEM_STORE) ? STRD : LDRD) | (sljit_ins)flags | RN4(mem & REG_MASK) | RT4(REG_PAIR_FIRST(reg)) | RD4(REG_PAIR_SECOND(reg)) | (sljit_ins)memw);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_s32 flags;
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw));
+
if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -255))
return SLJIT_ERR_UNSUPPORTED;
@@ -2295,6 +3243,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
case SLJIT_MOV:
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
case SLJIT_MOV_P:
flags = WORD_SIZE;
break;
@@ -2321,7 +3270,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
inst = sljit_mem32[flags] | 0x900;
- if (type & SLJIT_MEM_PRE)
+ if (!(type & SLJIT_MEM_POST))
inst |= 0x400;
if (memw >= 0)
@@ -2329,7 +3278,811 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
else
memw = -memw;
- return push_inst32(compiler, inst | RT4(reg) | RN4(mem & REG_MASK) | memw);
+ return push_inst32(compiler, inst | RT4(reg) | RN4(mem & REG_MASK) | (sljit_ins)memw);
+}
+
+static sljit_s32 update_mem_addr(struct sljit_compiler *compiler, sljit_s32 *mem, sljit_sw *memw, sljit_s32 max_offset)
+{
+ sljit_s32 arg = *mem;
+ sljit_sw argw = *memw;
+ sljit_uw imm;
+
+ *mem = TMP_REG1;
+
+ if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
+ *memw = 0;
+ return push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(arg & REG_MASK) | RM4(OFFS_REG(arg)) | ((sljit_uw)(argw & 0x3) << 6));
+ }
+
+ arg &= REG_MASK;
+
+ if (arg) {
+ if (argw <= max_offset && argw >= -0xff) {
+ *mem = arg;
+ return SLJIT_SUCCESS;
+ }
+
+ if (argw < 0) {
+ imm = get_imm((sljit_uw)(-argw & ~0xff));
+
+ if (imm) {
+ *memw = -(-argw & 0xff);
+ return push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(arg) | imm);
+ }
+ } else if ((argw & 0xfff) <= max_offset) {
+ imm = get_imm((sljit_uw)(argw & ~0xfff));
+
+ if (imm) {
+ *memw = argw & 0xfff;
+ return push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(arg) | imm);
+ }
+ } else {
+ imm = get_imm((sljit_uw)((argw | 0xfff) + 1));
+
+ if (imm) {
+ *memw = (argw & 0xfff) - 0x1000;
+ return push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(arg) | imm);
+ }
+ }
+ }
+
+ imm = (sljit_uw)(argw & ~0xfff);
+
+ if ((argw & 0xfff) > max_offset) {
+ imm += 0x1000;
+ *memw = (argw & 0xfff) - 0x1000;
+ } else
+ *memw = argw & 0xfff;
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, imm));
+
+ if (arg == 0)
+ return SLJIT_SUCCESS;
+
+ return push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, arg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
+
+ if (type & SLJIT_MEM_ALIGNED_32)
+ return emit_fop_mem(compiler, ((type ^ SLJIT_32) & SLJIT_32) | ((type & SLJIT_MEM_STORE) ? 0 : FPU_LOAD), freg, mem, memw);
+
+ if (type & SLJIT_MEM_STORE) {
+ FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | VN4(freg) | RT4(TMP_REG2)));
+
+ if (type & SLJIT_32)
+ return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, mem, memw, TMP_REG1);
+
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4));
+ mem |= SLJIT_MEM;
+
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, mem, memw, TMP_REG1));
+ FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | VN4(freg) | 0x80 | RT4(TMP_REG2)));
+ return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, mem, memw + 4, TMP_REG1);
+ }
+
+ if (type & SLJIT_32) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1));
+ return push_inst32(compiler, VMOV | VN4(freg) | RT4(TMP_REG2));
+ }
+
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4));
+ mem |= SLJIT_MEM;
+
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1));
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, mem, memw + 4, TMP_REG1));
+ return push_inst32(compiler, VMOV2 | VM4(freg) | RT4(TMP_REG2) | RN4(TMP_REG1));
+}
+
+static sljit_s32 sljit_emit_simd_mem_offset(struct sljit_compiler *compiler, sljit_s32 *mem_ptr, sljit_sw memw)
+{
+ sljit_uw imm;
+ sljit_s32 mem = *mem_ptr;
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ *mem_ptr = TMP_REG1;
+ return push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(mem & REG_MASK) | RM4(OFFS_REG(mem)) | ((sljit_uw)(memw & 0x3) << 6));
+ }
+
+ if (SLJIT_UNLIKELY(!(mem & REG_MASK))) {
+ *mem_ptr = TMP_REG1;
+ return load_immediate(compiler, TMP_REG1, (sljit_uw)memw);
+ }
+
+ mem &= REG_MASK;
+
+ if (memw == 0) {
+ *mem_ptr = mem;
+ return SLJIT_SUCCESS;
+ }
+
+ *mem_ptr = TMP_REG1;
+ imm = get_imm((sljit_uw)(memw < 0 ? -memw : memw));
+
+ if (imm != INVALID_IMM)
+ return push_inst32(compiler, ((memw < 0) ? SUB_WI : ADD_WI) | RD4(TMP_REG1) | RN4(mem) | imm);
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw));
+ return push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem));
+}
+
+static SLJIT_INLINE sljit_s32 simd_get_quad_reg_index(sljit_s32 freg)
+{
+ freg += freg & 0x1;
+
+ SLJIT_ASSERT((freg_map[freg] & 0x1) == (freg <= SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS));
+
+ if (freg <= SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS)
+ freg--;
+
+ return freg;
+}
+
+#define SLJIT_QUAD_OTHER_HALF(freg) ((((freg) & 0x1) << 1) - 1)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 alignment = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_mov(compiler, type, freg, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (!(srcdst & SLJIT_MEM)) {
+ if (reg_size == 4)
+ srcdst = simd_get_quad_reg_index(srcdst);
+
+ if (type & SLJIT_SIMD_STORE)
+ ins = VD4(srcdst) | VN4(freg) | VM4(freg);
+ else
+ ins = VD4(freg) | VN4(srcdst) | VM4(srcdst);
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 6;
+
+ return push_inst32(compiler, VORR | ins);
+ }
+
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &srcdst, srcdstw));
+
+ if (elem_size > 3)
+ elem_size = 3;
+
+ ins = ((type & SLJIT_SIMD_STORE) ? VST1 : VLD1) | VD4(freg)
+ | (sljit_ins)((reg_size == 3) ? (0x7 << 8) : (0xa << 8));
+
+ SLJIT_ASSERT(reg_size >= alignment);
+
+ if (alignment == 3)
+ ins |= 0x10;
+ else if (alignment >= 4)
+ ins |= 0x20;
+
+ return push_inst32(compiler, ins | RN4(srcdst) | ((sljit_ins)elem_size) << 6 | 0xf);
+}
+
+static sljit_ins simd_get_imm(sljit_s32 elem_size, sljit_uw value)
+{
+ sljit_ins result;
+
+ if (elem_size > 1 && (sljit_u16)value == (value >> 16)) {
+ elem_size = 1;
+ value = (sljit_u16)value;
+ }
+
+ if (elem_size == 1 && (sljit_u8)value == (value >> 8)) {
+ elem_size = 0;
+ value = (sljit_u8)value;
+ }
+
+ switch (elem_size) {
+ case 0:
+ SLJIT_ASSERT(value <= 0xff);
+ result = 0xe00;
+ break;
+ case 1:
+ SLJIT_ASSERT(value <= 0xffff);
+ result = 0;
+
+ while (1) {
+ if (value <= 0xff) {
+ result |= 0x800;
+ break;
+ }
+
+ if ((value & 0xff) == 0) {
+ value >>= 8;
+ result |= 0xa00;
+ break;
+ }
+
+ if (result != 0)
+ return ~(sljit_ins)0;
+
+ value ^= (sljit_uw)0xffff;
+ result = (1 << 5);
+ }
+ break;
+ default:
+ SLJIT_ASSERT(value <= 0xffffffff);
+ result = 0;
+
+ while (1) {
+ if (value <= 0xff) {
+ result |= 0x000;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff00) == 0) {
+ value >>= 8;
+ result |= 0x200;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff0000) == 0) {
+ value >>= 16;
+ result |= 0x400;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff000000) == 0) {
+ value >>= 24;
+ result |= 0x600;
+ break;
+ }
+
+ if ((value & (sljit_uw)0xff) == 0xff && (value >> 16) == 0) {
+ value >>= 8;
+ result |= 0xc00;
+ break;
+ }
+
+ if ((value & (sljit_uw)0xffff) == 0xffff && (value >> 24) == 0) {
+ value >>= 16;
+ result |= 0xd00;
+ break;
+ }
+
+ if (result != 0)
+ return ~(sljit_ins)0;
+
+ value = ~value;
+ result = (1 << 5);
+ }
+ break;
+ }
+
+ return ((sljit_ins)value & 0xf) | (((sljit_ins)value & 0x70) << 12) | (((sljit_ins)value & 0x80) << 21) | result;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins, imm;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_replicate(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) ? (elem_size < 2 || elem_size > 3) : (elem_size > 2))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (src == SLJIT_IMM && srcw == 0)
+ return push_inst32(compiler, VMOV_i | ((reg_size == 4) ? (1 << 6) : 0) | VD4(freg));
+
+ if (SLJIT_UNLIKELY(elem_size == 3)) {
+ SLJIT_ASSERT(type & SLJIT_SIMD_FLOAT);
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_fop_mem(compiler, FPU_LOAD | SLJIT_32, freg, src, srcw));
+ src = freg;
+ } else if (freg != src)
+ FAIL_IF(push_inst32(compiler, VORR | VD4(freg) | VN4(src) | VM4(src)));
+
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+
+ if (freg != src)
+ return push_inst32(compiler, VORR | VD4(freg) | VN4(src) | VM4(src));
+ return SLJIT_SUCCESS;
+ }
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &src, srcw));
+
+ ins = (sljit_ins)(elem_size << 6);
+
+ if (reg_size == 4)
+ ins |= 1 << 5;
+
+ return push_inst32(compiler, VLD1_r | ins | VD4(freg) | RN4(src) | 0xf);
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ SLJIT_ASSERT(elem_size == 2);
+ ins = ((sljit_ins)freg_ebit_map[src] << (16 + 2 + 1)) | ((sljit_ins)1 << (16 + 2));
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 6;
+
+ return push_inst32(compiler, VDUP_s | ins | VD4(freg) | (sljit_ins)freg_map[src]);
+ }
+
+ if (src == SLJIT_IMM) {
+ if (elem_size < 2)
+ srcw &= ((sljit_sw)1 << (((sljit_sw)1 << elem_size) << 3)) - 1;
+
+ imm = simd_get_imm(elem_size, (sljit_uw)srcw);
+
+ if (imm != ~(sljit_ins)0) {
+ if (reg_size == 4)
+ imm |= (sljit_ins)1 << 6;
+
+ return push_inst32(compiler, VMOV_i | imm | VD4(freg));
+ }
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)srcw));
+ src = TMP_REG1;
+ }
+
+ switch (elem_size) {
+ case 0:
+ ins = 1 << 22;
+ break;
+ case 1:
+ ins = 1 << 5;
+ break;
+ default:
+ ins = 0;
+ break;
+ }
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 21;
+
+ return push_inst32(compiler, VDUP | ins | VN4(freg) | RT4(src));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg, sljit_s32 lane_index,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_mov(compiler, type, freg, lane_index, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) ? (elem_size < 2 || elem_size > 3) : (elem_size > 2))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (type & SLJIT_SIMD_LANE_ZERO) {
+ ins = (reg_size == 3) ? 0 : ((sljit_ins)1 << 6);
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (elem_size == 3 && !(srcdst & SLJIT_MEM)) {
+ if (lane_index == 1)
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+
+ if (srcdst != freg)
+ FAIL_IF(push_inst32(compiler, VORR | VD4(freg) | VN4(srcdst) | VM4(srcdst)));
+
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ return push_inst32(compiler, VMOV_i | VD4(freg));
+ }
+
+ if (srcdst == freg || (elem_size == 3 && srcdst == (freg + SLJIT_QUAD_OTHER_HALF(freg)))) {
+ FAIL_IF(push_inst32(compiler, VORR | ins | VD4(TMP_FREG2) | VN4(freg) | VM4(freg)));
+ srcdst = TMP_FREG2;
+ srcdstw = 0;
+ }
+ }
+
+ FAIL_IF(push_inst32(compiler, VMOV_i | ins | VD4(freg)));
+ }
+
+ if (reg_size == 4 && lane_index >= (0x8 >> elem_size)) {
+ lane_index -= (0x8 >> elem_size);
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ }
+
+ if (srcdst & SLJIT_MEM) {
+ if (elem_size == 3)
+ return emit_fop_mem(compiler, ((type & SLJIT_SIMD_STORE) ? 0 : FPU_LOAD) | SLJIT_32, freg, srcdst, srcdstw);
+
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &srcdst, srcdstw));
+
+ lane_index = lane_index << elem_size;
+ ins = (sljit_ins)((elem_size << 10) | (lane_index << 5));
+ return push_inst32(compiler, ((type & SLJIT_SIMD_STORE) ? VST1_s : VLD1_s) | ins | VD4(freg) | RN4(srcdst) | 0xf);
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (elem_size == 3) {
+ if (type & SLJIT_SIMD_STORE)
+ return push_inst32(compiler, VORR | VD4(srcdst) | VN4(freg) | VM4(freg));
+ return push_inst32(compiler, VMOV_F32 | SLJIT_32 | VD4(freg) | VM4(srcdst));
+ }
+
+ if (type & SLJIT_SIMD_STORE) {
+ if (freg_ebit_map[freg] == 0) {
+ if (lane_index == 1)
+ freg = SLJIT_F64_SECOND(freg);
+
+ return push_inst32(compiler, VMOV_F32 | VD4(srcdst) | VM4(freg));
+ }
+
+ FAIL_IF(push_inst32(compiler, VMOV_s | (1 << 20) | ((sljit_ins)lane_index << 21) | VN4(freg) | RT4(TMP_REG1)));
+ return push_inst32(compiler, VMOV | VN4(srcdst) | RT4(TMP_REG1));
+ }
+
+ FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | VN4(srcdst) | RT4(TMP_REG1)));
+ return push_inst32(compiler, VMOV_s | ((sljit_ins)lane_index << 21) | VN4(freg) | RT4(TMP_REG1));
+ }
+
+ if (srcdst == SLJIT_IMM) {
+ if (elem_size < 2)
+ srcdstw &= ((sljit_sw)1 << (((sljit_sw)1 << elem_size) << 3)) - 1;
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)srcdstw));
+ srcdst = TMP_REG1;
+ }
+
+ if (elem_size == 0)
+ ins = 0x400000;
+ else if (elem_size == 1)
+ ins = 0x20;
+ else
+ ins = 0;
+
+ lane_index = lane_index << elem_size;
+ ins |= (sljit_ins)(((lane_index & 0x4) << 19) | ((lane_index & 0x3) << 5));
+
+ if (type & SLJIT_SIMD_STORE) {
+ ins |= (1 << 20);
+
+ if (elem_size < 2 && !(type & SLJIT_SIMD_LANE_SIGNED))
+ ins |= (1 << 23);
+ }
+
+ return push_inst32(compiler, VMOV_s | ins | VN4(freg) | RT4(srcdst));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_s32 src_lane_index)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_replicate(compiler, type, freg, src, src_lane_index));
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4) {
+ freg = simd_get_quad_reg_index(freg);
+ src = simd_get_quad_reg_index(src);
+
+ if (src_lane_index >= (0x8 >> elem_size)) {
+ src_lane_index -= (0x8 >> elem_size);
+ src += SLJIT_QUAD_OTHER_HALF(src);
+ }
+ }
+
+ if (elem_size == 3) {
+ if (freg != src)
+ FAIL_IF(push_inst32(compiler, VORR | VD4(freg) | VN4(src) | VM4(src)));
+
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+
+ if (freg != src)
+ return push_inst32(compiler, VORR | VD4(freg) | VN4(src) | VM4(src));
+ return SLJIT_SUCCESS;
+ }
+
+ ins = ((((sljit_ins)src_lane_index << 1) | 1) << (16 + elem_size));
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 6;
+
+ return push_inst32(compiler, VDUP_s | ins | VD4(freg) | VM4(src));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_extend(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 elem2_size = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ sljit_s32 dst_reg;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_extend(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size != 2 || elem2_size != 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &src, srcw));
+ if (reg_size == 4 && elem2_size - elem_size == 1)
+ FAIL_IF(push_inst32(compiler, VLD1 | (0x7 << 8) | VD4(freg) | RN4(src) | 0xf));
+ else
+ FAIL_IF(push_inst32(compiler, VLD1_s | (sljit_ins)((reg_size - elem2_size + elem_size) << 10) | VD4(freg) | RN4(src) | 0xf));
+ src = freg;
+ } else if (reg_size == 4)
+ src = simd_get_quad_reg_index(src);
+
+ if (!(type & SLJIT_SIMD_FLOAT)) {
+ dst_reg = (reg_size == 4) ? freg : TMP_FREG2;
+
+ do {
+ FAIL_IF(push_inst32(compiler, VSHLL | ((type & SLJIT_SIMD_EXTEND_SIGNED) ? 0 : (1 << 28))
+ | ((sljit_ins)1 << (19 + elem_size)) | VD4(dst_reg) | VM4(src)));
+ src = dst_reg;
+ } while (++elem_size < elem2_size);
+
+ if (dst_reg == TMP_FREG2)
+ return push_inst32(compiler, VORR | VD4(freg) | VN4(TMP_FREG2) | VM4(TMP_FREG2));
+ return SLJIT_SUCCESS;
+ }
+
+ /* No SIMD variant, must use VFP instead. */
+ SLJIT_ASSERT(reg_size == 4);
+
+ if (freg == src) {
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | VD4(freg) | VM4(src) | 0x20));
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ return push_inst32(compiler, VCVT_F64_F32 | VD4(freg) | VM4(src));
+ }
+
+ FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | VD4(freg) | VM4(src)));
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ return push_inst32(compiler, VCVT_F64_F32 | VD4(freg) | VM4(src) | 0x20);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_sign(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins, imms;
+ sljit_s32 dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_sign(compiler, type, freg, dst, dstw));
+
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ switch (elem_size) {
+ case 0:
+ imms = 0x243219;
+ ins = VSHR | (1 << 28) | (0x9 << 16);
+ break;
+ case 1:
+ imms = (reg_size == 4) ? 0x243219 : 0x2231;
+ ins = VSHR | (1 << 28) | (0x11 << 16);
+ break;
+ case 2:
+ imms = (reg_size == 4) ? 0x2231 : 0x21;
+ ins = VSHR | (1 << 28) | (0x21 << 16);
+ break;
+ default:
+ imms = 0x21;
+ ins = VSHR | (1 << 28) | (0x1 << 16) | (1 << 7);
+ break;
+ }
+
+ if (reg_size == 4) {
+ freg = simd_get_quad_reg_index(freg);
+ ins |= (sljit_ins)1 << 6;
+ }
+
+ SLJIT_ASSERT((freg_map[TMP_FREG2] & 0x1) == 0);
+ FAIL_IF(push_inst32(compiler, ins | VD4(TMP_FREG2) | VM4(freg)));
+
+ if (reg_size == 4 && elem_size > 0)
+ FAIL_IF(push_inst32(compiler, VMOVN | ((sljit_ins)(elem_size - 1) << 18) | VD4(TMP_FREG2) | VM4(TMP_FREG2)));
+
+ ins = (reg_size == 4 && elem_size == 0) ? (1 << 6) : 0;
+
+ while (imms >= 0x100) {
+ FAIL_IF(push_inst32(compiler, VSRA | (1 << 28) | ins | ((imms & 0xff) << 16) | VD4(TMP_FREG2) | VM4(TMP_FREG2)));
+ imms >>= 8;
+ }
+
+ FAIL_IF(push_inst32(compiler, VSRA | (1 << 28) | ins | (1 << 7) | (imms << 16) | VD4(TMP_FREG2) | VM4(TMP_FREG2)));
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ FAIL_IF(push_inst32(compiler, VMOV_s | (1 << 20) | (1 << 23) | (0x2 << 21) | RT4(dst_r) | VN4(TMP_FREG2)));
+
+ if (reg_size == 4 && elem_size == 0) {
+ SLJIT_ASSERT(freg_map[TMP_FREG2] + 1 == freg_map[TMP_FREG1]);
+ FAIL_IF(push_inst32(compiler, VMOV_s | (1 << 20) | (1 << 23) | (0x2 << 21) | RT4(TMP_REG2)| VN4(TMP_FREG1)));
+ FAIL_IF(push_inst32(compiler, ORR_W | RD4(dst_r) | RN4(dst_r) | RM4(TMP_REG2) | (0x2 << 12)));
+ }
+
+ if (dst_r == TMP_REG1)
+ return emit_op_mem(compiler, STORE | WORD_SIZE, TMP_REG1, dst, dstw, TMP_REG2);
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_op2(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src1_freg, sljit_s32 src2_freg)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_op2(compiler, type, dst_freg, src1_freg, src2_freg));
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ switch (SLJIT_SIMD_GET_OPCODE(type)) {
+ case SLJIT_SIMD_OP2_AND:
+ ins = VAND;
+ break;
+ case SLJIT_SIMD_OP2_OR:
+ ins = VORR;
+ break;
+ case SLJIT_SIMD_OP2_XOR:
+ ins = VEOR;
+ break;
+ }
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4) {
+ dst_freg = simd_get_quad_reg_index(dst_freg);
+ src1_freg = simd_get_quad_reg_index(src1_freg);
+ src2_freg = simd_get_quad_reg_index(src2_freg);
+ ins |= (sljit_ins)1 << 6;
+ }
+
+ return push_inst32(compiler, ins | VD4(dst_freg) | VN4(src1_freg) | VM4(src2_freg));
+}
+
+#undef FPU_LOAD
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_load(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 mem_reg)
+{
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_load(compiler, op, dst_reg, mem_reg));
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV_U8:
+ ins = LDREXB;
+ break;
+ case SLJIT_MOV_U16:
+ ins = LDREXH;
+ break;
+ default:
+ ins = LDREX;
+ break;
+ }
+
+ return push_inst32(compiler, ins | RN4(mem_reg) | RT4(dst_reg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_store(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src_reg,
+ sljit_s32 mem_reg,
+ sljit_s32 temp_reg)
+{
+ sljit_ins ins;
+
+ /* temp_reg == mem_reg is undefined so use another temp register */
+ SLJIT_UNUSED_ARG(temp_reg);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_store(compiler, op, src_reg, mem_reg, temp_reg));
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV_U8:
+ ins = STREXB | RM4(TMP_REG1);
+ break;
+ case SLJIT_MOV_U16:
+ ins = STREXH | RM4(TMP_REG1);
+ break;
+ default:
+ ins = STREX | RD4(TMP_REG1);
+ break;
+ }
+
+ FAIL_IF(push_inst32(compiler, ins | RN4(mem_reg) | RT4(src_reg)));
+ if (op & SLJIT_SET_ATOMIC_STORED)
+ return push_inst32(compiler, CMPI_W | RN4(TMP_REG1));
+
+ return SLJIT_SUCCESS;
}
SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
@@ -2346,7 +4099,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi
set_const(const_, compiler);
dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
- PTR_FAIL_IF(emit_imm32_const(compiler, dst_r, init_value));
+ PTR_FAIL_IF(emit_imm32_const(compiler, dst_r, (sljit_uw)init_value));
if (dst & SLJIT_MEM)
PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2));
@@ -2388,5 +4141,5 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
{
- sljit_set_jump_addr(addr, new_constant, executable_offset);
+ sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c
index a90345f1f8..9620b945f6 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c
@@ -26,6 +26,49 @@
/* mips 32-bit arch dependent functions. */
+static sljit_s32 emit_copysign(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_sw src1, sljit_sw src2, sljit_sw dst)
+{
+ int is_32 = (op & SLJIT_32);
+ sljit_ins mfhc = MFC1, mthc = MTC1;
+ sljit_ins src1_r = FS(src1), src2_r = FS(src2), dst_r = FS(dst);
+
+ if (!is_32) {
+ switch (cpu_feature_list & CPU_FEATURE_FR) {
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+ case CPU_FEATURE_FR:
+ mfhc = MFHC1;
+ mthc = MTHC1;
+ break;
+#endif /* SLJIT_MIPS_REV >= 2 */
+ default:
+ src1_r |= (1 << 11);
+ src2_r |= (1 << 11);
+ dst_r |= (1 << 11);
+ break;
+ }
+ }
+
+ FAIL_IF(push_inst(compiler, mfhc | T(TMP_REG1) | src1_r, DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, mfhc | T(TMP_REG2) | src2_r, DR(TMP_REG2)));
+ if (!is_32 && src1 != dst)
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT_S) | FS(src1) | FD(dst), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ else
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ FAIL_IF(push_inst(compiler, XOR | T(TMP_REG1) | D(TMP_REG2) | S(TMP_REG2), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, SRL | T(TMP_REG2) | D(TMP_REG2) | SH_IMM(31), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, SLL | T(TMP_REG2) | D(TMP_REG2) | SH_IMM(31), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, XOR | T(TMP_REG2) | D(TMP_REG1) | S(TMP_REG1), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, mthc | T(TMP_REG1) | dst_r, MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ if (mthc == MTC1)
+ return push_inst(compiler, NOP, UNMOVABLE_INS);
+#endif /* MIPS III */
+ return SLJIT_SUCCESS;
+}
+
static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_ar, sljit_sw imm)
{
if (!(imm & ~0xffff))
@@ -38,388 +81,112 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_a
return (imm & 0xffff) ? push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar) : SLJIT_SUCCESS;
}
-#define EMIT_LOGICAL(op_imm, op_norm) \
- if (flags & SRC2_IMM) { \
- if (op & SLJIT_SET_Z) \
- FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \
- if (!(flags & UNUSED_DEST)) \
- FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \
- } \
- else { \
- if (op & SLJIT_SET_Z) \
- FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \
- if (!(flags & UNUSED_DEST)) \
- FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \
- }
-
-#define EMIT_SHIFT(op_imm, op_v) \
- if (flags & SRC2_IMM) { \
- if (op & SLJIT_SET_Z) \
- FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \
- if (!(flags & UNUSED_DEST)) \
- FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \
- } \
- else { \
- if (op & SLJIT_SET_Z) \
- FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \
- if (!(flags & UNUSED_DEST)) \
- FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | D(dst), DR(dst))); \
- }
-
-static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
- sljit_s32 dst, sljit_s32 src1, sljit_sw src2)
+static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value)
{
- sljit_s32 is_overflow, is_carry, is_handled;
-
- switch (GET_OPCODE(op)) {
- case SLJIT_MOV:
- case SLJIT_MOV_U32:
- case SLJIT_MOV_S32:
- case SLJIT_MOV_P:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- if (dst != src2)
- return push_inst(compiler, ADDU | S(src2) | TA(0) | D(dst), DR(dst));
- return SLJIT_SUCCESS;
-
- case SLJIT_MOV_U8:
- case SLJIT_MOV_S8:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
- if (op == SLJIT_MOV_S8) {
-#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
- return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst));
-#else /* SLJIT_MIPS_REV < 1 */
- FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(24), DR(dst)));
- return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(24), DR(dst));
-#endif /* SLJIT_MIPS_REV >= 1 */
- }
- return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst));
- }
- else {
- SLJIT_ASSERT(dst == src2);
- }
- return SLJIT_SUCCESS;
-
- case SLJIT_MOV_U16:
- case SLJIT_MOV_S16:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
- if (op == SLJIT_MOV_S16) {
-#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
- return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst));
-#else /* SLJIT_MIPS_REV < 1 */
- FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(16), DR(dst)));
- return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(16), DR(dst));
-#endif /* SLJIT_MIPS_REV >= 1 */
- }
- return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst));
- }
- else {
- SLJIT_ASSERT(dst == src2);
- }
- return SLJIT_SUCCESS;
-
- case SLJIT_NOT:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- if (!(flags & UNUSED_DEST))
- FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst)));
- return SLJIT_SUCCESS;
-
- case SLJIT_CLZ:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
-#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, CLZ | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
- if (!(flags & UNUSED_DEST))
- FAIL_IF(push_inst(compiler, CLZ | S(src2) | T(dst) | D(dst), DR(dst)));
-#else /* SLJIT_MIPS_REV < 1 */
- if (SLJIT_UNLIKELY(flags & UNUSED_DEST)) {
- FAIL_IF(push_inst(compiler, SRL | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG));
- return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG);
- }
- /* Nearly all instructions are unmovable in the following sequence. */
- FAIL_IF(push_inst(compiler, ADDU | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1)));
- /* Check zero. */
- FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS));
- FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM(32), UNMOVABLE_INS));
- FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(dst) | IMM(-1), DR(dst)));
- /* Loop for searching the highest bit. */
- FAIL_IF(push_inst(compiler, ADDIU | S(dst) | T(dst) | IMM(1), DR(dst)));
- FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS));
- FAIL_IF(push_inst(compiler, SLL | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS));
-#endif /* SLJIT_MIPS_REV >= 1 */
- return SLJIT_SUCCESS;
-
- case SLJIT_ADD:
- is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
- is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
-
- if (flags & SRC2_IMM) {
- if (is_overflow) {
- if (src2 >= 0)
- FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
- else
- FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
- }
- else if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
-
- if (is_overflow || is_carry) {
- if (src2 >= 0)
- FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- else {
- FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- }
- /* dst may be the same as src1 or src2. */
- if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
- FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst)));
- }
- else {
- if (is_overflow)
- FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- else if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
-
- if (is_overflow || is_carry)
- FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
- /* dst may be the same as src1 or src2. */
- if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
- FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst)));
- }
-
- /* a + b >= a | b (otherwise, the carry should be set to 1). */
- if (is_overflow || is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- if (!is_overflow)
- return SLJIT_SUCCESS;
- FAIL_IF(push_inst(compiler, SLL | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
- FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
- FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
- return push_inst(compiler, SRL | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG);
-
- case SLJIT_ADDC:
- is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
-
- if (flags & SRC2_IMM) {
- if (is_carry) {
- if (src2 >= 0)
- FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
- else {
- FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
- FAIL_IF(push_inst(compiler, OR | S(src1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
- }
- }
- FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst)));
- } else {
- if (is_carry)
- FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- /* dst may be the same as src1 or src2. */
- FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst)));
- }
- if (is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
-
- FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
- if (!is_carry)
- return SLJIT_SUCCESS;
-
- /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */
- FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- /* Set carry flag. */
- return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
-
- case SLJIT_SUB:
- if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
- FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
- src2 = TMP_REG2;
- flags &= ~SRC2_IMM;
- }
-
- is_handled = 0;
-
- if (flags & SRC2_IMM) {
- if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) {
- FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- is_handled = 1;
- }
- else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) {
- FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- is_handled = 1;
- }
- }
-
- if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) {
- is_handled = 1;
-
- if (flags & SRC2_IMM) {
- FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
- src2 = TMP_REG2;
- flags &= ~SRC2_IMM;
- }
-
- if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) {
- FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- else if (GET_FLAG_TYPE(op) == SLJIT_GREATER || GET_FLAG_TYPE(op) == SLJIT_LESS_EQUAL)
- {
- FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) {
- FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- else if (GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER || GET_FLAG_TYPE(op) == SLJIT_SIG_LESS_EQUAL)
- {
- FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- }
-
- if (is_handled) {
- if (flags & SRC2_IMM) {
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
- if (!(flags & UNUSED_DEST))
- return push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst));
- }
- else {
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- if (!(flags & UNUSED_DEST))
- return push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst));
- }
- return SLJIT_SUCCESS;
- }
-
- is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
- is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
+ FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 16), DR(dst)));
+ return push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value), DR(dst));
+}
- if (flags & SRC2_IMM) {
- if (is_overflow) {
- if (src2 >= 0)
- FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
- else
- FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
- }
- else if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
-
- if (is_overflow || is_carry)
- FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- /* dst may be the same as src1 or src2. */
- if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
- FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst)));
- }
- else {
- if (is_overflow)
- FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- else if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
-
- if (is_overflow || is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
- /* dst may be the same as src1 or src2. */
- if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
- FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst)));
- }
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ union {
+ struct {
+#if defined(SLJIT_LITTLE_ENDIAN) && SLJIT_LITTLE_ENDIAN
+ sljit_s32 lo;
+ sljit_s32 hi;
+#else /* !SLJIT_LITTLE_ENDIAN */
+ sljit_s32 hi;
+ sljit_s32 lo;
+#endif /* SLJIT_LITTLE_ENDIAN */
+ } bin;
+ sljit_f64 value;
+ } u;
- if (!is_overflow)
- return SLJIT_SUCCESS;
- FAIL_IF(push_inst(compiler, SLL | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
- FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
- FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
- return push_inst(compiler, SRL | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG);
-
- case SLJIT_SUBC:
- if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
- FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
- src2 = TMP_REG2;
- flags &= ~SRC2_IMM;
- }
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.bin.lo != 0)
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG1), u.bin.lo));
+ if (u.bin.hi != 0)
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG2), u.bin.hi));
+
+ FAIL_IF(push_inst(compiler, MTC1 | (u.bin.lo != 0 ? T(TMP_REG1) : TA(0)) | FS(freg), MOVABLE_INS));
+ switch (cpu_feature_list & CPU_FEATURE_FR) {
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+ case CPU_FEATURE_FR:
+ return push_inst(compiler, MTHC1 | (u.bin.hi != 0 ? T(TMP_REG2) : TA(0)) | FS(freg), MOVABLE_INS);
+#endif /* SLJIT_MIPS_REV >= 2 */
+ default:
+ FAIL_IF(push_inst(compiler, MTC1 | (u.bin.hi != 0 ? T(TMP_REG2) : TA(0)) | FS(freg) | (1 << 11), MOVABLE_INS));
+ break;
+ }
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ return SLJIT_SUCCESS;
+}
- is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_s32 reg2 = 0;
+ sljit_ins inst = FS(freg);
+ sljit_ins mthc = MTC1, mfhc = MFC1;
+ int is_32 = (op & SLJIT_32);
- if (flags & SRC2_IMM) {
- if (is_carry)
- FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
- /* dst may be the same as src1 or src2. */
- FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst)));
- }
- else {
- if (is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- /* dst may be the same as src1 or src2. */
- FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst)));
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ op = GET_OPCODE(op);
+ if (reg & REG_PAIR_MASK) {
+ reg2 = REG_PAIR_SECOND(reg);
+ reg = REG_PAIR_FIRST(reg);
+
+ inst |= T(reg2);
+
+ if (op == SLJIT_COPY_TO_F64)
+ FAIL_IF(push_inst(compiler, MTC1 | inst, MOVABLE_INS));
+ else
+ FAIL_IF(push_inst(compiler, MFC1 | inst, DR(reg2)));
+
+ inst = FS(freg) | (1 << 11);
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+ if (cpu_feature_list & CPU_FEATURE_FR) {
+ mthc = MTHC1;
+ mfhc = MFHC1;
+ inst = FS(freg);
}
+#endif /* SLJIT_MIPS_REV >= 2 */
+ }
- if (is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
-
- FAIL_IF(push_inst(compiler, SUBU | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
- return (is_carry) ? push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG) : SLJIT_SUCCESS;
-
- case SLJIT_MUL:
- SLJIT_ASSERT(!(flags & SRC2_IMM));
-
- if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW) {
-#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
- return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst));
-#else /* SLJIT_MIPS_REV < 1 */
- FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS));
- return push_inst(compiler, MFLO | D(dst), DR(dst));
-#endif /* SLJIT_MIPS_REV >= 1 */
+ inst |= T(reg);
+ if (!is_32 && !reg2) {
+ switch (cpu_feature_list & CPU_FEATURE_FR) {
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+ case CPU_FEATURE_FR:
+ mthc = MTHC1;
+ mfhc = MFHC1;
+ break;
+#endif /* SLJIT_MIPS_REV >= 2 */
+ default:
+ inst |= (1 << 11);
+ break;
}
-
-#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
- FAIL_IF(push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)));
- FAIL_IF(push_inst(compiler, MUH | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
-#else /* SLJIT_MIPS_REV < 6 */
- FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS));
- FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG));
- FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst)));
-#endif /* SLJIT_MIPS_REV >= 6 */
- FAIL_IF(push_inst(compiler, SRA | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG));
- return push_inst(compiler, SUBU | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
-
- case SLJIT_AND:
- EMIT_LOGICAL(ANDI, AND);
- return SLJIT_SUCCESS;
-
- case SLJIT_OR:
- EMIT_LOGICAL(ORI, OR);
- return SLJIT_SUCCESS;
-
- case SLJIT_XOR:
- EMIT_LOGICAL(XORI, XOR);
- return SLJIT_SUCCESS;
-
- case SLJIT_SHL:
- EMIT_SHIFT(SLL, SLLV);
- return SLJIT_SUCCESS;
-
- case SLJIT_LSHR:
- EMIT_SHIFT(SRL, SRLV);
- return SLJIT_SUCCESS;
-
- case SLJIT_ASHR:
- EMIT_SHIFT(SRA, SRAV);
- return SLJIT_SUCCESS;
}
- SLJIT_UNREACHABLE();
- return SLJIT_SUCCESS;
-}
+ if (op == SLJIT_COPY_TO_F64)
+ FAIL_IF(push_inst(compiler, mthc | inst, MOVABLE_INS));
+ else
+ FAIL_IF(push_inst(compiler, mfhc | inst, DR(reg)));
-static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value)
-{
- FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 16), DR(dst)));
- return push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value), DR(dst));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ if (mthc == MTC1 || mfhc == MFC1)
+ return push_inst(compiler, NOP, UNMOVABLE_INS);
+#endif /* MIPS III */
+ return SLJIT_SUCCESS;
}
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
@@ -438,92 +205,132 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
{
- sljit_set_jump_addr(addr, new_constant, executable_offset);
+ sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
}
-static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_ins *ins_ptr)
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_ins *ins_ptr, sljit_u32 *extra_space)
{
- sljit_s32 stack_offset = 0;
- sljit_s32 arg_count = 0;
+ sljit_u32 is_tail_call = *extra_space & SLJIT_CALL_RETURN;
+ sljit_u32 offset = 0;
sljit_s32 float_arg_count = 0;
sljit_s32 word_arg_count = 0;
sljit_s32 types = 0;
- sljit_s32 arg_count_save, types_save;
sljit_ins prev_ins = NOP;
sljit_ins ins = NOP;
sljit_u8 offsets[4];
+ sljit_u8 *offsets_ptr = offsets;
+#if defined(SLJIT_LITTLE_ENDIAN) && SLJIT_LITTLE_ENDIAN
+ sljit_ins f64_hi = TA(7), f64_lo = TA(6);
+#else
+ sljit_ins f64_hi = TA(6), f64_lo = TA(7);
+#endif /* SLJIT_LITTLE_ENDIAN */
SLJIT_ASSERT(reg_map[TMP_REG1] == 4 && freg_map[TMP_FREG1] == 12);
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
+
+ /* See ABI description in sljit_emit_enter. */
while (arg_types) {
- types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+ types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
+ *offsets_ptr = (sljit_u8)offset;
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- offsets[arg_count] = (sljit_u8)stack_offset;
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (offset & 0x7) {
+ offset += sizeof(sljit_sw);
+ *offsets_ptr = (sljit_u8)offset;
+ }
- if (word_arg_count == 0 && arg_count <= 1)
- offsets[arg_count] = 254 + arg_count;
+ if (word_arg_count == 0 && float_arg_count <= 1)
+ *offsets_ptr = (sljit_u8)(254 + float_arg_count);
- stack_offset += sizeof(sljit_f32);
- arg_count++;
+ offset += sizeof(sljit_f64);
float_arg_count++;
break;
- case SLJIT_ARG_TYPE_F64:
- if (stack_offset & 0x7)
- stack_offset += sizeof(sljit_sw);
- offsets[arg_count] = (sljit_u8)stack_offset;
-
- if (word_arg_count == 0 && arg_count <= 1)
- offsets[arg_count] = 254 + arg_count;
+ case SLJIT_ARG_TYPE_F32:
+ if (word_arg_count == 0 && float_arg_count <= 1)
+ *offsets_ptr = (sljit_u8)(254 + float_arg_count);
- stack_offset += sizeof(sljit_f64);
- arg_count++;
+ offset += sizeof(sljit_f32);
float_arg_count++;
break;
default:
- offsets[arg_count] = (sljit_u8)stack_offset;
- stack_offset += sizeof(sljit_sw);
- arg_count++;
+ offset += sizeof(sljit_sw);
word_arg_count++;
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
+ offsets_ptr++;
}
- /* Stack is aligned to 16 bytes, max two doubles can be placed on the stack. */
- if (stack_offset > 16)
- FAIL_IF(push_inst(compiler, ADDIU | S(SLJIT_SP) | T(SLJIT_SP) | IMM(-16), DR(SLJIT_SP)));
+ /* Stack is aligned to 16 bytes. */
+ SLJIT_ASSERT(offset <= 8 * sizeof(sljit_sw));
- types_save = types;
- arg_count_save = arg_count;
+ if (offset > 4 * sizeof(sljit_sw) && (!is_tail_call || offset > compiler->args_size)) {
+ if (is_tail_call) {
+ offset = (offset + sizeof(sljit_sw) + 15) & ~(sljit_uw)0xf;
+ FAIL_IF(emit_stack_frame_release(compiler, (sljit_s32)offset, &prev_ins));
+ *extra_space = offset;
+ } else {
+ FAIL_IF(push_inst(compiler, ADDIU | S(SLJIT_SP) | T(SLJIT_SP) | IMM(-16), DR(SLJIT_SP)));
+ *extra_space = 16;
+ }
+ } else {
+ if (is_tail_call)
+ FAIL_IF(emit_stack_frame_release(compiler, 0, &prev_ins));
+ *extra_space = 0;
+ }
while (types) {
- switch (types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- arg_count--;
- if (offsets[arg_count] < 254)
- ins = SWC1 | S(SLJIT_SP) | FT(float_arg_count) | IMM(offsets[arg_count]);
+ --offsets_ptr;
+
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (*offsets_ptr < 4 * sizeof(sljit_sw)) {
+ if (prev_ins != NOP)
+ FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS));
+
+ /* Must be preceded by at least one other argument,
+ * and its starting offset must be 8 because of alignment. */
+ SLJIT_ASSERT((*offsets_ptr >> 2) == 2);
+ switch (cpu_feature_list & CPU_FEATURE_FR) {
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+ case CPU_FEATURE_FR:
+ prev_ins = MFHC1 | f64_hi | FS(float_arg_count);
+ break;
+#endif /* SLJIT_MIPS_REV >= 2 */
+ default:
+ prev_ins = MFC1 | f64_hi | FS(float_arg_count) | (1 << 11);
+ break;
+ }
+ ins = MFC1 | f64_lo | FS(float_arg_count);
+ } else if (*offsets_ptr < 254)
+ ins = SDC1 | S(SLJIT_SP) | FT(float_arg_count) | IMM(*offsets_ptr);
+ else if (*offsets_ptr == 254)
+ ins = MOV_fmt(FMT_D) | FS(SLJIT_FR0) | FD(TMP_FREG1);
+
float_arg_count--;
break;
- case SLJIT_ARG_TYPE_F64:
- arg_count--;
- if (offsets[arg_count] < 254)
- ins = SDC1 | S(SLJIT_SP) | FT(float_arg_count) | IMM(offsets[arg_count]);
+ case SLJIT_ARG_TYPE_F32:
+ if (*offsets_ptr < 4 * sizeof (sljit_sw))
+ ins = MFC1 | TA(4 + (*offsets_ptr >> 2)) | FS(float_arg_count);
+ else if (*offsets_ptr < 254)
+ ins = SWC1 | S(SLJIT_SP) | FT(float_arg_count) | IMM(*offsets_ptr);
+ else if (*offsets_ptr == 254)
+ ins = MOV_fmt(FMT_S) | FS(SLJIT_FR0) | FD(TMP_FREG1);
+
float_arg_count--;
break;
default:
- if (offsets[arg_count - 1] >= 16)
- ins = SW | S(SLJIT_SP) | T(word_arg_count) | IMM(offsets[arg_count - 1]);
- else if (arg_count != word_arg_count)
- ins = ADDU | S(word_arg_count) | TA(0) | DA(4 + (offsets[arg_count - 1] >> 2));
- else if (arg_count == 1)
+ if (*offsets_ptr >= 4 * sizeof (sljit_sw))
+ ins = SW | S(SLJIT_SP) | T(word_arg_count) | IMM(*offsets_ptr);
+ else if ((*offsets_ptr >> 2) != word_arg_count - 1)
+ ins = ADDU | S(word_arg_count) | TA(0) | DA(4 + (*offsets_ptr >> 2));
+ else if (*offsets_ptr == 0)
ins = ADDU | S(SLJIT_R0) | TA(0) | DA(4);
- arg_count--;
word_arg_count--;
break;
}
@@ -535,45 +342,7 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
ins = NOP;
}
- types >>= SLJIT_DEF_SHIFT;
- }
-
- types = types_save;
- arg_count = arg_count_save;
-
- while (types) {
- switch (types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- arg_count--;
- if (offsets[arg_count] == 254)
- ins = MOV_S | FMT_S | FS(SLJIT_FR0) | FD(TMP_FREG1);
- else if (offsets[arg_count] < 16)
- ins = LW | S(SLJIT_SP) | TA(4 + (offsets[arg_count] >> 2)) | IMM(offsets[arg_count]);
- break;
- case SLJIT_ARG_TYPE_F64:
- arg_count--;
- if (offsets[arg_count] == 254)
- ins = MOV_S | FMT_D | FS(SLJIT_FR0) | FD(TMP_FREG1);
- else if (offsets[arg_count] < 16) {
- if (prev_ins != NOP)
- FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS));
- prev_ins = LW | S(SLJIT_SP) | TA(4 + (offsets[arg_count] >> 2)) | IMM(offsets[arg_count]);
- ins = LW | S(SLJIT_SP) | TA(5 + (offsets[arg_count] >> 2)) | IMM(offsets[arg_count] + sizeof(sljit_sw));
- }
- break;
- default:
- arg_count--;
- break;
- }
-
- if (ins != NOP) {
- if (prev_ins != NOP)
- FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS));
- prev_ins = ins;
- ins = NOP;
- }
-
- types >>= SLJIT_DEF_SHIFT;
+ types >>= SLJIT_ARG_SHIFT;
}
*ins_ptr = prev_ins;
@@ -581,42 +350,12 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
return SLJIT_SUCCESS;
}
-static sljit_s32 post_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
-{
- sljit_s32 stack_offset = 0;
-
- arg_types >>= SLJIT_DEF_SHIFT;
-
- while (arg_types) {
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- stack_offset += sizeof(sljit_f32);
- break;
- case SLJIT_ARG_TYPE_F64:
- if (stack_offset & 0x7)
- stack_offset += sizeof(sljit_sw);
- stack_offset += sizeof(sljit_f64);
- break;
- default:
- stack_offset += sizeof(sljit_sw);
- break;
- }
-
- arg_types >>= SLJIT_DEF_SHIFT;
- }
-
- /* Stack is aligned to 16 bytes, max two doubles can be placed on the stack. */
- if (stack_offset > 16)
- return push_inst(compiler, ADDIU | S(SLJIT_SP) | T(SLJIT_SP) | IMM(16), DR(SLJIT_SP));
-
- return SLJIT_SUCCESS;
-}
-
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 arg_types)
{
struct sljit_jump *jump;
- sljit_ins ins;
+ sljit_u32 extra_space = 0;
+ sljit_ins ins = NOP;
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
@@ -624,21 +363,46 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
PTR_FAIL_IF(!jump);
set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
- type &= 0xff;
- PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins));
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG) {
+ extra_space = (sljit_u32)type;
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins, &extra_space));
+ } else if (type & SLJIT_CALL_RETURN)
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
- PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0));
+ if (ins == NOP && compiler->delay_slot != UNMOVABLE_INS)
+ jump->flags |= IS_MOVABLE;
+
+ if (!(type & SLJIT_CALL_RETURN) || extra_space > 0) {
+ jump->flags |= IS_JAL;
+
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ jump->flags |= IS_CALL;
+
+ PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ } else
+ PTR_FAIL_IF(push_inst(compiler, JR | S(PIC_ADDR_REG), UNMOVABLE_INS));
- jump->flags |= IS_JAL | IS_CALL;
- PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
jump->addr = compiler->size;
PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS));
- PTR_FAIL_IF(post_call_with_args(compiler, arg_types));
+ /* Maximum number of instructions required for generating a constant. */
+ compiler->size += 2;
+
+ if (extra_space == 0)
+ return jump;
+ if (type & SLJIT_CALL_RETURN)
+ PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, RETURN_ADDR_REG,
+ SLJIT_MEM1(SLJIT_SP), (sljit_sw)(extra_space - sizeof(sljit_sw))));
+
+ if (type & SLJIT_CALL_RETURN)
+ PTR_FAIL_IF(push_inst(compiler, JR | SA(RETURN_ADDR_REG), UNMOVABLE_INS));
+
+ PTR_FAIL_IF(push_inst(compiler, ADDIU | S(SLJIT_SP) | T(SLJIT_SP) | IMM(extra_space),
+ (type & SLJIT_CALL_RETURN) ? UNMOVABLE_INS : DR(SLJIT_SP)));
return jump;
}
@@ -646,26 +410,63 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
sljit_s32 arg_types,
sljit_s32 src, sljit_sw srcw)
{
+ sljit_u32 extra_space = (sljit_u32)type;
sljit_ins ins;
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
+ src = PIC_ADDR_REG;
+ srcw = 0;
+ }
+
+ if ((type & 0xff) == SLJIT_CALL_REG_ARG) {
+ if (type & SLJIT_CALL_RETURN) {
+ if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, ADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
+ src = PIC_ADDR_REG;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
+
+ if (ins != NOP)
+ FAIL_IF(push_inst(compiler, ins, MOVABLE_INS));
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, type, src, srcw);
+ }
+
SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw));
- else if (FAST_IS_REG(src))
+ else if (src != PIC_ADDR_REG)
FAIL_IF(push_inst(compiler, ADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
- else if (src & SLJIT_MEM) {
- ADJUST_LOCAL_OFFSET(src, srcw);
- FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
- }
- FAIL_IF(call_with_args(compiler, arg_types, &ins));
+ FAIL_IF(call_with_args(compiler, arg_types, &ins, &extra_space));
/* Register input. */
- FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ if (!(type & SLJIT_CALL_RETURN) || extra_space > 0)
+ FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ else
+ FAIL_IF(push_inst(compiler, JR | S(PIC_ADDR_REG), UNMOVABLE_INS));
FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS));
- return post_call_with_args(compiler, arg_types);
+
+ if (extra_space == 0)
+ return SLJIT_SUCCESS;
+
+ if (type & SLJIT_CALL_RETURN)
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, RETURN_ADDR_REG,
+ SLJIT_MEM1(SLJIT_SP), (sljit_sw)(extra_space - sizeof(sljit_sw))));
+
+ if (type & SLJIT_CALL_RETURN)
+ FAIL_IF(push_inst(compiler, JR | SA(RETURN_ADDR_REG), UNMOVABLE_INS));
+
+ return push_inst(compiler, ADDIU | S(SLJIT_SP) | T(SLJIT_SP) | IMM(extra_space),
+ (type & SLJIT_CALL_RETURN) ? UNMOVABLE_INS : DR(SLJIT_SP));
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c
index 1f22e49ed9..52a0d3fb7a 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c
@@ -26,6 +26,23 @@
/* mips 64-bit arch dependent functions. */
+static sljit_s32 emit_copysign(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_s32 src2, sljit_s32 dst)
+{
+ FAIL_IF(push_inst(compiler, SELECT_OP(DMFC1, MFC1) | T(TMP_REG1) | FS(src1), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DMFC1, MFC1) | T(TMP_REG2) | FS(src2), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, XOR | S(TMP_REG2) | T(TMP_REG1) | D(TMP_REG2), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(TMP_REG2) | D(TMP_REG2) | SH_IMM(31), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | T(TMP_REG2) | D(TMP_REG2) | SH_IMM(31), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | T(TMP_REG2) | D(TMP_REG1), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DMTC1, MTC1) | T(TMP_REG1) | FS(dst), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ if (!(op & SLJIT_32))
+ return push_inst(compiler, NOP, UNMOVABLE_INS);
+#endif /* MIPS III */
+ return SLJIT_SUCCESS;
+}
+
static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_ar, sljit_sw imm)
{
sljit_s32 shift = 32;
@@ -46,9 +63,9 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_a
}
/* Zero extended number. */
- uimm = imm;
+ uimm = (sljit_uw)imm;
if (imm < 0) {
- uimm = ~imm;
+ uimm = ~(sljit_uw)imm;
inv = 1;
}
@@ -118,420 +135,77 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_a
return !(imm & 0xffff) ? SLJIT_SUCCESS : push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar);
}
-#define SELECT_OP(a, b) \
- (!(op & SLJIT_I32_OP) ? a : b)
-
-#define EMIT_LOGICAL(op_imm, op_norm) \
- if (flags & SRC2_IMM) { \
- if (op & SLJIT_SET_Z) \
- FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \
- if (!(flags & UNUSED_DEST)) \
- FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \
- } \
- else { \
- if (op & SLJIT_SET_Z) \
- FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \
- if (!(flags & UNUSED_DEST)) \
- FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \
- }
-
-#define EMIT_SHIFT(op_dimm, op_dimm32, op_imm, op_dv, op_v) \
- if (flags & SRC2_IMM) { \
- if (src2 >= 32) { \
- SLJIT_ASSERT(!(op & SLJIT_I32_OP)); \
- ins = op_dimm32; \
- src2 -= 32; \
- } \
- else \
- ins = (op & SLJIT_I32_OP) ? op_imm : op_dimm; \
- if (op & SLJIT_SET_Z) \
- FAIL_IF(push_inst(compiler, ins | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \
- if (!(flags & UNUSED_DEST)) \
- FAIL_IF(push_inst(compiler, ins | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \
- } \
- else { \
- ins = (op & SLJIT_I32_OP) ? op_v : op_dv; \
- if (op & SLJIT_SET_Z) \
- FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \
- if (!(flags & UNUSED_DEST)) \
- FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | D(dst), DR(dst))); \
- }
-
-static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
- sljit_s32 dst, sljit_s32 src1, sljit_sw src2)
+static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value)
{
- sljit_ins ins;
- sljit_s32 is_overflow, is_carry, is_handled;
-
- switch (GET_OPCODE(op)) {
- case SLJIT_MOV:
- case SLJIT_MOV_P:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- if (dst != src2)
- return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(dst), DR(dst));
- return SLJIT_SUCCESS;
-
- case SLJIT_MOV_U8:
- case SLJIT_MOV_S8:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
- if (op == SLJIT_MOV_S8) {
- FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(24), DR(dst)));
- return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(24), DR(dst));
- }
- return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst));
- }
- else {
- SLJIT_ASSERT(dst == src2);
- }
- return SLJIT_SUCCESS;
-
- case SLJIT_MOV_U16:
- case SLJIT_MOV_S16:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
- if (op == SLJIT_MOV_S16) {
- FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(16), DR(dst)));
- return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(16), DR(dst));
- }
- return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst));
- }
- else {
- SLJIT_ASSERT(dst == src2);
- }
- return SLJIT_SUCCESS;
-
- case SLJIT_MOV_U32:
- SLJIT_ASSERT(!(op & SLJIT_I32_OP));
- FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(0), DR(dst)));
- return push_inst(compiler, DSRL32 | T(dst) | D(dst) | SH_IMM(0), DR(dst));
-
- case SLJIT_MOV_S32:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- return push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(0), DR(dst));
-
- case SLJIT_NOT:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- if (!(flags & UNUSED_DEST))
- FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst)));
- return SLJIT_SUCCESS;
-
- case SLJIT_CLZ:
- SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
-#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
- if (!(flags & UNUSED_DEST))
- FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | T(dst) | D(dst), DR(dst)));
-#else /* SLJIT_MIPS_REV < 1 */
- if (SLJIT_UNLIKELY(flags & UNUSED_DEST)) {
- FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG));
- return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG);
- }
- /* Nearly all instructions are unmovable in the following sequence. */
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1)));
- /* Check zero. */
- FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS));
- FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM((op & SLJIT_I32_OP) ? 32 : 64), UNMOVABLE_INS));
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | T(dst) | IMM(-1), DR(dst)));
- /* Loop for searching the highest bit. */
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(dst) | T(dst) | IMM(1), DR(dst)));
- FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS));
- FAIL_IF(push_inst(compiler, SELECT_OP(DSLL, SLL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS));
-#endif /* SLJIT_MIPS_REV >= 1 */
- return SLJIT_SUCCESS;
-
- case SLJIT_ADD:
- is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
- is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
-
- if (flags & SRC2_IMM) {
- if (is_overflow) {
- if (src2 >= 0)
- FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
- else
- FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
- }
- else if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
-
- if (is_overflow || is_carry) {
- if (src2 >= 0)
- FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- else {
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- }
- /* dst may be the same as src1 or src2. */
- if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst)));
- }
- else {
- if (is_overflow)
- FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- else if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
-
- if (is_overflow || is_carry)
- FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
- /* dst may be the same as src1 or src2. */
- if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst)));
- }
-
- /* a + b >= a | b (otherwise, the carry should be set to 1). */
- if (is_overflow || is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- if (!is_overflow)
- return SLJIT_SUCCESS;
- FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
- FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
- FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
- return push_inst(compiler, SELECT_OP(DSRL32, SRL) | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG);
-
- case SLJIT_ADDC:
- is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
-
- if (flags & SRC2_IMM) {
- if (is_carry) {
- if (src2 >= 0)
- FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
- else {
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
- FAIL_IF(push_inst(compiler, OR | S(src1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
- }
- }
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst)));
- } else {
- if (is_carry)
- FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- /* dst may be the same as src1 or src2. */
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst)));
- }
- if (is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
-
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
- if (!is_carry)
- return SLJIT_SUCCESS;
-
- /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */
- FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- /* Set carry flag. */
- return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
-
- case SLJIT_SUB:
- if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
- FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
- src2 = TMP_REG2;
- flags &= ~SRC2_IMM;
- }
-
- is_handled = 0;
-
- if (flags & SRC2_IMM) {
- if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) {
- FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- is_handled = 1;
- }
- else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) {
- FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- is_handled = 1;
- }
- }
-
- if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) {
- is_handled = 1;
-
- if (flags & SRC2_IMM) {
- FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
- src2 = TMP_REG2;
- flags &= ~SRC2_IMM;
- }
-
- if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) {
- FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- else if (GET_FLAG_TYPE(op) == SLJIT_GREATER || GET_FLAG_TYPE(op) == SLJIT_LESS_EQUAL)
- {
- FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) {
- FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- else if (GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER || GET_FLAG_TYPE(op) == SLJIT_SIG_LESS_EQUAL)
- {
- FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
- }
- }
-
- if (is_handled) {
- if (flags & SRC2_IMM) {
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
- if (!(flags & UNUSED_DEST))
- return push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst));
- }
- else {
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- if (!(flags & UNUSED_DEST))
- return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst));
- }
- return SLJIT_SUCCESS;
- }
-
- is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
- is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
-
- if (flags & SRC2_IMM) {
- if (is_overflow) {
- if (src2 >= 0)
- FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
- else
- FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
- }
- else if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
-
- if (is_overflow || is_carry)
- FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
- /* dst may be the same as src1 or src2. */
- if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst)));
- }
- else {
- if (is_overflow)
- FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- else if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
-
- if (is_overflow || is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
- /* dst may be the same as src1 or src2. */
- if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
- FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst)));
- }
-
- if (!is_overflow)
- return SLJIT_SUCCESS;
- FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
- FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
- FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
- if (op & SLJIT_SET_Z)
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
- return push_inst(compiler, SELECT_OP(DSRL32, SRL) | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG);
-
- case SLJIT_SUBC:
- if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
- FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
- src2 = TMP_REG2;
- flags &= ~SRC2_IMM;
- }
+ FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 48), DR(dst)));
+ FAIL_IF(push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value >> 32), DR(dst)));
+ FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(16), DR(dst)));
+ FAIL_IF(push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value >> 16), DR(dst)));
+ FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(16), DR(dst)));
+ return push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value), DR(dst));
+}
- is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ union {
+ sljit_sw imm;
+ sljit_f64 value;
+ } u;
- if (flags & SRC2_IMM) {
- if (is_carry)
- FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
- /* dst may be the same as src1 or src2. */
- FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst)));
- }
- else {
- if (is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
- /* dst may be the same as src1 or src2. */
- FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst)));
- }
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
- if (is_carry)
- FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
-
- FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
- return (is_carry) ? push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG) : SLJIT_SUCCESS;
-
- case SLJIT_MUL:
- SLJIT_ASSERT(!(flags & SRC2_IMM));
-
- if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW) {
-#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
- return push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst));
-#elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
- if (op & SLJIT_I32_OP)
- return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst));
- FAIL_IF(push_inst(compiler, DMULT | S(src1) | T(src2), MOVABLE_INS));
- return push_inst(compiler, MFLO | D(dst), DR(dst));
-#else /* SLJIT_MIPS_REV < 1 */
- FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS));
- return push_inst(compiler, MFLO | D(dst), DR(dst));
-#endif /* SLJIT_MIPS_REV >= 6 */
- }
+ u.value = value;
-#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
- FAIL_IF(push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst)));
- FAIL_IF(push_inst(compiler, SELECT_OP(DMUH, MUH) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
-#else /* SLJIT_MIPS_REV < 6 */
- FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS));
- FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG));
- FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst)));
-#endif /* SLJIT_MIPS_REV >= 6 */
- FAIL_IF(push_inst(compiler, SELECT_OP(DSRA32, SRA) | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG));
- return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
-
- case SLJIT_AND:
- EMIT_LOGICAL(ANDI, AND);
+ if (u.imm == 0) {
+ FAIL_IF(push_inst(compiler, DMTC1 | TA(0) | FS(freg), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
return SLJIT_SUCCESS;
+ }
- case SLJIT_OR:
- EMIT_LOGICAL(ORI, OR);
- return SLJIT_SUCCESS;
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG1), u.imm));
+ FAIL_IF(push_inst(compiler, DMTC1 | T(TMP_REG1) | FS(freg), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ return SLJIT_SUCCESS;
+}
- case SLJIT_XOR:
- EMIT_LOGICAL(XORI, XOR);
- return SLJIT_SUCCESS;
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_ins inst;
- case SLJIT_SHL:
- EMIT_SHIFT(DSLL, DSLL32, SLL, DSLLV, SLLV);
- return SLJIT_SUCCESS;
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
- case SLJIT_LSHR:
- EMIT_SHIFT(DSRL, DSRL32, SRL, DSRLV, SRLV);
- return SLJIT_SUCCESS;
+ inst = T(reg) | FS(freg);
- case SLJIT_ASHR:
- EMIT_SHIFT(DSRA, DSRA32, SRA, DSRAV, SRAV);
- return SLJIT_SUCCESS;
- }
+ if (GET_OPCODE(op) == SLJIT_COPY_TO_F64)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DMTC1, MTC1) | inst, MOVABLE_INS));
+ else
+ FAIL_IF(push_inst(compiler, SELECT_OP(DMFC1, MFC1) | inst, DR(reg)));
- SLJIT_UNREACHABLE();
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ if (!(op & SLJIT_32))
+ return push_inst(compiler, NOP, UNMOVABLE_INS);
+#endif /* MIPS III */
return SLJIT_SUCCESS;
}
-static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value)
-{
- FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 48), DR(dst)));
- FAIL_IF(push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value >> 32), DR(dst)));
- FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(16), DR(dst)));
- FAIL_IF(push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value >> 16), DR(dst)));
- FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(16), DR(dst)));
- return push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value), DR(dst));
-}
-
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
{
sljit_ins *inst = (sljit_ins *)addr;
SLJIT_UNUSED_ARG(executable_offset);
SLJIT_UPDATE_WX_FLAGS(inst, inst + 6, 0);
- inst[0] = (inst[0] & 0xffff0000) | ((new_target >> 48) & 0xffff);
- inst[1] = (inst[1] & 0xffff0000) | ((new_target >> 32) & 0xffff);
- inst[3] = (inst[3] & 0xffff0000) | ((new_target >> 16) & 0xffff);
- inst[5] = (inst[5] & 0xffff0000) | (new_target & 0xffff);
+ inst[0] = (inst[0] & 0xffff0000) | ((sljit_ins)(new_target >> 48) & 0xffff);
+ inst[1] = (inst[1] & 0xffff0000) | ((sljit_ins)(new_target >> 32) & 0xffff);
+ inst[3] = (inst[3] & 0xffff0000) | ((sljit_ins)(new_target >> 16) & 0xffff);
+ inst[5] = (inst[5] & 0xffff0000) | ((sljit_ins)new_target & 0xffff);
SLJIT_UPDATE_WX_FLAGS(inst, inst + 6, 1);
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 6);
@@ -539,7 +213,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
{
- sljit_set_jump_addr(addr, new_constant, executable_offset);
+ sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
}
static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_ins *ins_ptr)
@@ -548,19 +222,19 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
sljit_s32 word_arg_count = 0;
sljit_s32 float_arg_count = 0;
sljit_s32 types = 0;
- sljit_ins prev_ins = NOP;
+ sljit_ins prev_ins = *ins_ptr;
sljit_ins ins = NOP;
SLJIT_ASSERT(reg_map[TMP_REG1] == 4 && freg_map[TMP_FREG1] == 12);
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+ types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ switch (arg_types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
arg_count++;
float_arg_count++;
break;
@@ -570,24 +244,24 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
while (types) {
- switch (types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
if (arg_count != float_arg_count)
- ins = MOV_S | FMT_S | FS(float_arg_count) | FD(arg_count);
+ ins = MOV_fmt(FMT_D) | FS(float_arg_count) | FD(arg_count);
else if (arg_count == 1)
- ins = MOV_S | FMT_S | FS(SLJIT_FR0) | FD(TMP_FREG1);
+ ins = MOV_fmt(FMT_D) | FS(SLJIT_FR0) | FD(TMP_FREG1);
arg_count--;
float_arg_count--;
break;
- case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
if (arg_count != float_arg_count)
- ins = MOV_S | FMT_D | FS(float_arg_count) | FD(arg_count);
+ ins = MOV_fmt(FMT_S) | FS(float_arg_count) | FD(arg_count);
else if (arg_count == 1)
- ins = MOV_S | FMT_D | FS(SLJIT_FR0) | FD(TMP_FREG1);
+ ins = MOV_fmt(FMT_S) | FS(SLJIT_FR0) | FD(TMP_FREG1);
arg_count--;
float_arg_count--;
break;
@@ -608,7 +282,7 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
ins = NOP;
}
- types >>= SLJIT_DEF_SHIFT;
+ types >>= SLJIT_ARG_SHIFT;
}
*ins_ptr = prev_ins;
@@ -620,7 +294,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
sljit_s32 arg_types)
{
struct sljit_jump *jump;
- sljit_ins ins;
+ sljit_ins ins = NOP;
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
@@ -628,19 +302,33 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
PTR_FAIL_IF(!jump);
set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
- type &= 0xff;
- PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins));
+ if (type & SLJIT_CALL_RETURN)
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
+
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins));
SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
- PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0));
+ if (ins == NOP && compiler->delay_slot != UNMOVABLE_INS)
+ jump->flags |= IS_MOVABLE;
+
+ if (!(type & SLJIT_CALL_RETURN)) {
+ jump->flags |= IS_JAL;
+
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ jump->flags |= IS_CALL;
+
+ PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ } else
+ PTR_FAIL_IF(push_inst(compiler, JR | S(PIC_ADDR_REG), UNMOVABLE_INS));
- jump->flags |= IS_JAL | IS_CALL;
- PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
jump->addr = compiler->size;
PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS));
+ /* Maximum number of instructions required for generating a constant. */
+ compiler->size += 6;
return jump;
}
@@ -648,25 +336,52 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
sljit_s32 arg_types,
sljit_s32 src, sljit_sw srcw)
{
- sljit_ins ins;
+ sljit_ins ins = NOP;
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
+ src = PIC_ADDR_REG;
+ srcw = 0;
+ }
+
+ if ((type & 0xff) == SLJIT_CALL_REG_ARG) {
+ if (type & SLJIT_CALL_RETURN) {
+ if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, DADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
+ src = PIC_ADDR_REG;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
+
+ if (ins != NOP)
+ FAIL_IF(push_inst(compiler, ins, MOVABLE_INS));
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, type, src, srcw);
+ }
+
SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw));
- else if (FAST_IS_REG(src))
+ else if (src != PIC_ADDR_REG)
FAIL_IF(push_inst(compiler, DADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
- else if (src & SLJIT_MEM) {
- ADJUST_LOCAL_OFFSET(src, srcw);
- FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
- }
+
+ if (type & SLJIT_CALL_RETURN)
+ FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
FAIL_IF(call_with_args(compiler, arg_types, &ins));
/* Register input. */
- FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ if (!(type & SLJIT_CALL_RETURN))
+ FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ else
+ FAIL_IF(push_inst(compiler, JR | S(PIC_ADDR_REG), UNMOVABLE_INS));
return push_inst(compiler, ins, UNMOVABLE_INS);
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c
index fd747695a7..807b3474ea 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c
@@ -26,9 +26,12 @@
/* Latest MIPS architecture. */
-#ifndef __mips_hard_float
+#ifdef HAVE_PRCTL
+#include <sys/prctl.h>
+#endif
+
+#if !defined(__mips_hard_float) || defined(__mips_single_float)
/* Disable automatic detection, covers both -msoft-float and -mno-float */
-#undef SLJIT_IS_FPU_AVAILABLE
#define SLJIT_IS_FPU_AVAILABLE 0
#endif
@@ -42,6 +45,22 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
return "MIPS64-R6" SLJIT_CPUINFO;
#endif /* SLJIT_CONFIG_MIPS_32 */
+#elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 5)
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ return "MIPS32-R5" SLJIT_CPUINFO;
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ return "MIPS64-R5" SLJIT_CPUINFO;
+#endif /* SLJIT_CONFIG_MIPS_32 */
+
+#elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ return "MIPS32-R2" SLJIT_CPUINFO;
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ return "MIPS64-R2" SLJIT_CPUINFO;
+#endif /* SLJIT_CONFIG_MIPS_32 */
+
#elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
@@ -75,49 +94,53 @@ typedef sljit_u32 sljit_ins;
#define EQUAL_FLAG 3
#define OTHER_FLAG 1
+static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 7] = {
+ 0, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 24, 23, 22, 21, 20, 19, 18, 17, 16, 29, 4, 25, 31, 3, 1
+};
+
#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
#define TMP_FREG3 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3)
-static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
- 0, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 24, 23, 22, 21, 20, 19, 18, 17, 16, 29, 4, 25, 31
-};
-
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = {
- 0, 0, 14, 2, 4, 6, 8, 12, 10, 16
+static const sljit_u8 freg_map[((SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3) << 1) + 1] = {
+ 0,
+ 0, 14, 2, 4, 6, 8, 18, 30, 28, 26, 24, 22, 20,
+ 12, 10, 16,
+ 1, 15, 3, 5, 7, 9, 19, 31, 29, 27, 25, 23, 21,
+ 13, 11, 17
};
-#else
+#else /* !SLJIT_CONFIG_MIPS_32 */
static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = {
- 0, 0, 13, 14, 15, 16, 17, 12, 18, 10
+ 0, 0, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 1, 2, 3, 4, 5, 6, 7, 8, 9, 31, 30, 29, 28, 27, 26, 25, 24, 12, 11, 10
};
-#endif
+#endif /* SLJIT_CONFIG_MIPS_32 */
/* --------------------------------------------------------------------- */
/* Instrucion forms */
/* --------------------------------------------------------------------- */
-#define S(s) (reg_map[s] << 21)
-#define T(t) (reg_map[t] << 16)
-#define D(d) (reg_map[d] << 11)
-#define FT(t) (freg_map[t] << 16)
-#define FS(s) (freg_map[s] << 11)
-#define FD(d) (freg_map[d] << 6)
+#define S(s) ((sljit_ins)reg_map[s] << 21)
+#define T(t) ((sljit_ins)reg_map[t] << 16)
+#define D(d) ((sljit_ins)reg_map[d] << 11)
+#define FT(t) ((sljit_ins)freg_map[t] << 16)
+#define FS(s) ((sljit_ins)freg_map[s] << 11)
+#define FD(d) ((sljit_ins)freg_map[d] << 6)
/* Absolute registers. */
-#define SA(s) ((s) << 21)
-#define TA(t) ((t) << 16)
-#define DA(d) ((d) << 11)
-#define IMM(imm) ((imm) & 0xffff)
-#define SH_IMM(imm) ((imm) << 6)
+#define SA(s) ((sljit_ins)(s) << 21)
+#define TA(t) ((sljit_ins)(t) << 16)
+#define DA(d) ((sljit_ins)(d) << 11)
+#define IMM(imm) ((sljit_ins)(imm) & 0xffff)
+#define SH_IMM(imm) ((sljit_ins)(imm) << 6)
#define DR(dr) (reg_map[dr])
#define FR(dr) (freg_map[dr])
-#define HI(opcode) ((opcode) << 26)
-#define LO(opcode) (opcode)
+#define HI(opcode) ((sljit_ins)(opcode) << 26)
+#define LO(opcode) ((sljit_ins)(opcode))
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
/* CMP.cond.fmt */
/* S = (20 << 21) D = (21 << 21) */
@@ -151,12 +174,18 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = {
#define BREAK (HI(0) | LO(13))
#define CFC1 (HI(17) | (2 << 21))
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
+#define C_EQ_S (HI(17) | CMP_FMT_S | LO(2))
+#define C_OLE_S (HI(17) | CMP_FMT_S | LO(6))
+#define C_OLT_S (HI(17) | CMP_FMT_S | LO(4))
#define C_UEQ_S (HI(17) | CMP_FMT_S | LO(3))
#define C_ULE_S (HI(17) | CMP_FMT_S | LO(7))
#define C_ULT_S (HI(17) | CMP_FMT_S | LO(5))
#define C_UN_S (HI(17) | CMP_FMT_S | LO(1))
#define C_FD (FD(TMP_FREG3))
#else /* SLJIT_MIPS_REV < 6 */
+#define C_EQ_S (HI(17) | FMT_S | LO(50))
+#define C_OLE_S (HI(17) | FMT_S | LO(54))
+#define C_OLT_S (HI(17) | FMT_S | LO(52))
#define C_UEQ_S (HI(17) | FMT_S | LO(51))
#define C_ULE_S (HI(17) | FMT_S | LO(55))
#define C_ULT_S (HI(17) | FMT_S | LO(53))
@@ -186,6 +215,18 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = {
#define DMULTU (HI(0) | LO(29))
#endif /* SLJIT_MIPS_REV >= 6 */
#define DIV_S (HI(17) | FMT_S | LO(3))
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+#define DINSU (HI(31) | LO(6))
+#endif /* SLJIT_MIPS_REV >= 2 */
+#define DMFC1 (HI(17) | (1 << 21))
+#define DMTC1 (HI(17) | (5 << 21))
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+#define DROTR (HI(0) | (1 << 21) | LO(58))
+#define DROTR32 (HI(0) | (1 << 21) | LO(62))
+#define DROTRV (HI(0) | (1 << 6) | LO(22))
+#define DSBH (HI(31) | (2 << 6) | LO(36))
+#define DSHD (HI(31) | (5 << 6) | LO(36))
+#endif /* SLJIT_MIPS_REV >= 2 */
#define DSLL (HI(0) | LO(56))
#define DSLL32 (HI(0) | LO(60))
#define DSLLV (HI(0) | LO(20))
@@ -205,9 +246,18 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = {
#define JR (HI(0) | LO(8))
#endif /* SLJIT_MIPS_REV >= 6 */
#define LD (HI(55))
+#define LDL (HI(26))
+#define LDR (HI(27))
+#define LDC1 (HI(53))
#define LUI (HI(15))
#define LW (HI(35))
+#define LWL (HI(34))
+#define LWR (HI(38))
+#define LWC1 (HI(49))
#define MFC1 (HI(17))
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+#define MFHC1 (HI(17) | (3 << 21))
+#endif /* SLJIT_MIPS_REV >= 2 */
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
#define MOD (HI(0) | (3 << 6) | LO(26))
#define MODU (HI(0) | (3 << 6) | LO(27))
@@ -215,8 +265,10 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = {
#define MFHI (HI(0) | LO(16))
#define MFLO (HI(0) | LO(18))
#endif /* SLJIT_MIPS_REV >= 6 */
-#define MOV_S (HI(17) | FMT_S | LO(6))
#define MTC1 (HI(17) | (4 << 21))
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+#define MTHC1 (HI(17) | (7 << 21))
+#endif /* SLJIT_MIPS_REV >= 2 */
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
#define MUH (HI(0) | (3 << 6) | LO(24))
#define MUHU (HI(0) | (3 << 6) | LO(25))
@@ -232,7 +284,13 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = {
#define NOR (HI(0) | LO(39))
#define OR (HI(0) | LO(37))
#define ORI (HI(13))
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+#define ROTR (HI(0) | (1 << 21) | LO(2))
+#define ROTRV (HI(0) | (1 << 6) | LO(6))
+#endif /* SLJIT_MIPS_REV >= 2 */
#define SD (HI(63))
+#define SDL (HI(44))
+#define SDR (HI(45))
#define SDC1 (HI(61))
#define SLT (HI(0) | LO(42))
#define SLTI (HI(10))
@@ -247,8 +305,13 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = {
#define SUB_S (HI(17) | FMT_S | LO(1))
#define SUBU (HI(0) | LO(35))
#define SW (HI(43))
+#define SWL (HI(42))
+#define SWR (HI(46))
#define SWC1 (HI(57))
#define TRUNC_W_S (HI(17) | FMT_S | LO(13))
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+#define WSBH (HI(31) | (2 << 6) | LO(32))
+#endif /* SLJIT_MIPS_REV >= 2 */
#define XOR (HI(0) | LO(38))
#define XORI (HI(14))
@@ -259,40 +322,150 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = {
#else /* SLJIT_MIPS_REV < 6 */
#define DCLZ (HI(28) | LO(36))
#define MOVF (HI(0) | (0 << 16) | LO(1))
+#define MOVF_S (HI(17) | FMT_S | (0 << 16) | LO(17))
#define MOVN (HI(0) | LO(11))
+#define MOVN_S (HI(17) | FMT_S | LO(19))
#define MOVT (HI(0) | (1 << 16) | LO(1))
+#define MOVT_S (HI(17) | FMT_S | (1 << 16) | LO(17))
#define MOVZ (HI(0) | LO(10))
+#define MOVZ_S (HI(17) | FMT_S | LO(18))
#define MUL (HI(28) | LO(2))
#endif /* SLJIT_MIPS_REV >= 6 */
#define PREF (HI(51))
#define PREFX (HI(19) | LO(15))
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
#define SEB (HI(31) | (16 << 6) | LO(32))
#define SEH (HI(31) | (24 << 6) | LO(32))
+#endif /* SLJIT_MIPS_REV >= 2 */
#endif /* SLJIT_MIPS_REV >= 1 */
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
#define ADDU_W ADDU
#define ADDIU_W ADDIU
#define SLL_W SLL
+#define SRA_W SRA
#define SUBU_W SUBU
+#define STORE_W SW
+#define LOAD_W LW
#else
#define ADDU_W DADDU
#define ADDIU_W DADDIU
#define SLL_W DSLL
+#define SRA_W DSRA
#define SUBU_W DSUBU
+#define STORE_W SD
+#define LOAD_W LD
#endif
+#define MOV_fmt(f) (HI(17) | f | LO(6))
+
#define SIMM_MAX (0x7fff)
#define SIMM_MIN (-0x8000)
#define UIMM_MAX (0xffff)
+#define CPU_FEATURE_DETECTED (1 << 0)
+#define CPU_FEATURE_FPU (1 << 1)
+#define CPU_FEATURE_FP64 (1 << 2)
+#define CPU_FEATURE_FR (1 << 3)
+
+static sljit_u32 cpu_feature_list = 0;
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
+ && (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+
+static sljit_s32 function_check_is_freg(struct sljit_compiler *compiler, sljit_s32 fr, sljit_s32 is_32)
+{
+ if (compiler->scratches == -1)
+ return 0;
+
+ if (is_32 && fr >= SLJIT_F64_SECOND(SLJIT_FR0))
+ fr -= SLJIT_F64_SECOND(0);
+
+ return (fr >= SLJIT_FR0 && fr < (SLJIT_FR0 + compiler->fscratches))
+ || (fr > (SLJIT_FS0 - compiler->fsaveds) && fr <= SLJIT_FS0)
+ || (fr >= SLJIT_TMP_FREGISTER_BASE && fr < (SLJIT_TMP_FREGISTER_BASE + SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS));
+}
+
+#endif /* SLJIT_CONFIG_MIPS_32 && SLJIT_ARGUMENT_CHECKS */
+
+static void get_cpu_features(void)
+{
+#if !defined(SLJIT_IS_FPU_AVAILABLE) && defined(__GNUC__)
+ sljit_u32 fir = 0;
+#endif /* !SLJIT_IS_FPU_AVAILABLE && __GNUC__ */
+ sljit_u32 feature_list = CPU_FEATURE_DETECTED;
+
+#if defined(SLJIT_IS_FPU_AVAILABLE)
+#if SLJIT_IS_FPU_AVAILABLE
+ feature_list |= CPU_FEATURE_FPU;
+#if SLJIT_IS_FPU_AVAILABLE == 64
+ feature_list |= CPU_FEATURE_FP64;
+#endif /* SLJIT_IS_FPU_AVAILABLE == 64 */
+#endif /* SLJIT_IS_FPU_AVAILABLE */
+#elif defined(__GNUC__)
+ __asm__ ("cfc1 %0, $0" : "=r"(fir));
+ if ((fir & (0x3 << 16)) == (0x3 << 16))
+ feature_list |= CPU_FEATURE_FPU;
+
+#if (defined(SLJIT_CONFIG_MIPS_64) && SLJIT_CONFIG_MIPS_64) \
+ && (!defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV < 2)
+ if ((feature_list & CPU_FEATURE_FPU))
+ feature_list |= CPU_FEATURE_FP64;
+#else /* SLJIT_CONFIG_MIPS32 || SLJIT_MIPS_REV >= 2 */
+ if ((fir & (1 << 22)))
+ feature_list |= CPU_FEATURE_FP64;
+#endif /* SLJIT_CONFIG_MIPS_64 && SLJIT_MIPS_REV < 2 */
+#endif /* SLJIT_IS_FPU_AVAILABLE */
+
+ if ((feature_list & CPU_FEATURE_FPU) && (feature_list & CPU_FEATURE_FP64)) {
+#if defined(SLJIT_CONFIG_MIPS_32) && SLJIT_CONFIG_MIPS_32
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 6
+ feature_list |= CPU_FEATURE_FR;
+#elif defined(SLJIT_DETECT_FR) && SLJIT_DETECT_FR == 0
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 5
+ feature_list |= CPU_FEATURE_FR;
+#endif /* SLJIT_MIPS_REV >= 5 */
+#else
+ sljit_s32 flag = -1;
+#ifndef FR_GET_FP_MODE
+ sljit_f64 zero = 0.0;
+#else /* PR_GET_FP_MODE */
+ flag = prctl(PR_GET_FP_MODE);
+
+ if (flag > 0)
+ feature_list |= CPU_FEATURE_FR;
+#endif /* FP_GET_PR_MODE */
+#if ((defined(SLJIT_DETECT_FR) && SLJIT_DETECT_FR == 2) \
+ || (!defined(PR_GET_FP_MODE) && (!defined(SLJIT_DETECT_FR) || SLJIT_DETECT_FR >= 1))) \
+ && (defined(__GNUC__) && (defined(__mips) && __mips >= 2))
+ if (flag < 0) {
+ __asm__ (".set oddspreg\n"
+ "lwc1 $f17, %0\n"
+ "ldc1 $f16, %1\n"
+ "swc1 $f17, %0\n"
+ : "+m" (flag) : "m" (zero) : "$f16", "$f17");
+ if (flag)
+ feature_list |= CPU_FEATURE_FR;
+ }
+#endif /* (!PR_GET_FP_MODE || (PR_GET_FP_MODE && SLJIT_DETECT_FR == 2)) && __GNUC__ */
+#endif /* SLJIT_MIPS_REV >= 6 */
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ /* StatusFR=1 is the only mode supported by the code in MIPS64 */
+ feature_list |= CPU_FEATURE_FR;
+#endif /* SLJIT_CONFIG_MIPS_32 */
+ }
+
+ cpu_feature_list = feature_list;
+}
+
/* dest_reg is the absolute name of the register
Useful for reordering instructions in the delay slot. */
static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_s32 delay_slot)
{
sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
SLJIT_ASSERT(delay_slot == MOVABLE_INS || delay_slot >= UNMOVABLE_INS
- || delay_slot == ((ins >> 11) & 0x1f) || delay_slot == ((ins >> 16) & 0x1f));
+ || (sljit_ins)delay_slot == ((ins >> 11) & 0x1f)
+ || (sljit_ins)delay_slot == ((ins >> 16) & 0x1f));
FAIL_IF(!ptr);
*ptr = ins;
compiler->size++;
@@ -300,7 +473,7 @@ static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit
return SLJIT_SUCCESS;
}
-static SLJIT_INLINE sljit_ins invert_branch(sljit_s32 flags)
+static SLJIT_INLINE sljit_ins invert_branch(sljit_uw flags)
{
if (flags & IS_BIT26_COND)
return (1 << 26);
@@ -311,19 +484,21 @@ static SLJIT_INLINE sljit_ins invert_branch(sljit_s32 flags)
return (1 << 16);
}
-static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset)
+static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code, sljit_sw executable_offset)
{
sljit_sw diff;
sljit_uw target_addr;
sljit_ins *inst;
sljit_ins saved_inst;
+ inst = (sljit_ins *)jump->addr;
+
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
if (jump->flags & (SLJIT_REWRITABLE_JUMP | IS_CALL))
- return code_ptr;
+ goto exit;
#else
if (jump->flags & SLJIT_REWRITABLE_JUMP)
- return code_ptr;
+ goto exit;
#endif
if (jump->flags & JUMP_ADDR)
@@ -333,13 +508,12 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i
target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset;
}
- inst = (sljit_ins *)jump->addr;
if (jump->flags & IS_COND)
inst--;
#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
if (jump->flags & IS_CALL)
- goto keep_address;
+ goto preserve_addr;
#endif
/* B instructions. */
@@ -360,18 +534,17 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i
jump->addr -= 2 * sizeof(sljit_ins);
return inst;
}
- }
- else {
+ } else {
diff = ((sljit_sw)target_addr - (sljit_sw)(inst + 1) - executable_offset) >> 2;
if (diff <= SIMM_MAX && diff >= SIMM_MIN) {
jump->flags |= PATCH_B;
if (!(jump->flags & IS_COND)) {
inst[0] = (jump->flags & IS_JAL) ? BAL : B;
- inst[1] = NOP;
+ /* Keep inst[1] */
return inst + 1;
}
- inst[0] = inst[0] ^ invert_branch(jump->flags);
+ inst[0] ^= invert_branch(jump->flags);
inst[1] = NOP;
jump->addr -= sizeof(sljit_ins);
return inst + 1;
@@ -379,7 +552,7 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i
}
if (jump->flags & IS_COND) {
- if ((jump->flags & IS_MOVABLE) && (target_addr & ~0xfffffff) == ((jump->addr + 2 * sizeof(sljit_ins)) & ~0xfffffff)) {
+ if ((jump->flags & IS_MOVABLE) && (target_addr & ~(sljit_uw)0xfffffff) == ((jump->addr + 2 * sizeof(sljit_ins)) & ~(sljit_uw)0xfffffff)) {
jump->flags |= PATCH_J;
saved_inst = inst[0];
inst[0] = inst[-1];
@@ -388,7 +561,7 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i
inst[2] = NOP;
return inst + 2;
}
- else if ((target_addr & ~0xfffffff) == ((jump->addr + 3 * sizeof(sljit_ins)) & ~0xfffffff)) {
+ else if ((target_addr & ~(sljit_uw)0xfffffff) == ((jump->addr + 3 * sizeof(sljit_ins)) & ~(sljit_uw)0xfffffff)) {
jump->flags |= PATCH_J;
inst[0] = (inst[0] & 0xffff0000) | 3;
inst[1] = NOP;
@@ -400,7 +573,7 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i
}
else {
/* J instuctions. */
- if ((jump->flags & IS_MOVABLE) && (target_addr & ~0xfffffff) == (jump->addr & ~0xfffffff)) {
+ if ((jump->flags & IS_MOVABLE) && (target_addr & ~(sljit_uw)0xfffffff) == (jump->addr & ~(sljit_uw)0xfffffff)) {
jump->flags |= PATCH_J;
inst[0] = inst[-1];
inst[-1] = (jump->flags & IS_JAL) ? JAL : J;
@@ -408,39 +581,49 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i
return inst;
}
- if ((target_addr & ~0xfffffff) == ((jump->addr + sizeof(sljit_ins)) & ~0xfffffff)) {
+ if ((target_addr & ~(sljit_uw)0xfffffff) == ((jump->addr + sizeof(sljit_ins)) & ~(sljit_uw)0xfffffff)) {
jump->flags |= PATCH_J;
inst[0] = (jump->flags & IS_JAL) ? JAL : J;
- inst[1] = NOP;
+ /* Keep inst[1] */
return inst + 1;
}
}
+ if (jump->flags & IS_COND)
+ inst++;
+
#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
-keep_address:
+preserve_addr:
if (target_addr <= 0x7fffffff) {
jump->flags |= PATCH_ABS32;
- if (jump->flags & IS_COND) {
- inst[0] -= 4;
- inst++;
- }
- inst[2] = inst[6];
- inst[3] = inst[7];
+ if (jump->flags & IS_COND)
+ inst[-1] -= 4;
+
+ inst[2] = inst[0];
+ inst[3] = inst[1];
return inst + 3;
}
if (target_addr <= 0x7fffffffffffl) {
jump->flags |= PATCH_ABS48;
- if (jump->flags & IS_COND) {
- inst[0] -= 2;
- inst++;
- }
- inst[4] = inst[6];
- inst[5] = inst[7];
+ if (jump->flags & IS_COND)
+ inst[-1] -= 2;
+
+ inst[4] = inst[0];
+ inst[5] = inst[1];
return inst + 5;
}
#endif
- return code_ptr;
+exit:
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ inst[2] = inst[0];
+ inst[3] = inst[1];
+ return inst + 3;
+#else
+ inst[6] = inst[0];
+ inst[7] = inst[1];
+ return inst + 7;
+#endif
}
#ifdef __GNUC__
@@ -455,30 +638,52 @@ static __attribute__ ((noinline)) void sljit_cache_flush(void* code, void* code_
static SLJIT_INLINE sljit_sw put_label_get_length(struct sljit_put_label *put_label, sljit_uw max_label)
{
if (max_label < 0x80000000l) {
- put_label->flags = 0;
+ put_label->flags = PATCH_ABS32;
return 1;
}
if (max_label < 0x800000000000l) {
- put_label->flags = 1;
+ put_label->flags = PATCH_ABS48;
return 3;
}
- put_label->flags = 2;
+ put_label->flags = 0;
return 5;
}
-static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label)
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+static SLJIT_INLINE void load_addr_to_reg(void *dst, sljit_u32 reg)
{
- sljit_uw addr = put_label->label->addr;
- sljit_ins *inst = (sljit_ins *)put_label->addr;
- sljit_s32 reg = *inst;
+ struct sljit_jump *jump;
+ struct sljit_put_label *put_label;
+ sljit_uw flags;
+ sljit_ins *inst;
+ sljit_uw addr;
- if (put_label->flags == 0) {
+ if (reg != 0) {
+ jump = (struct sljit_jump*)dst;
+ flags = jump->flags;
+ inst = (sljit_ins*)jump->addr;
+ addr = (flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
+ } else {
+ put_label = (struct sljit_put_label*)dst;
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ flags = put_label->flags;
+#endif
+ inst = (sljit_ins*)put_label->addr;
+ addr = put_label->label->addr;
+ reg = *inst;
+ }
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ inst[0] = LUI | T(reg) | IMM(addr >> 16);
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ if (flags & PATCH_ABS32) {
SLJIT_ASSERT(addr < 0x80000000l);
inst[0] = LUI | T(reg) | IMM(addr >> 16);
}
- else if (put_label->flags == 1) {
+ else if (flags & PATCH_ABS48) {
SLJIT_ASSERT(addr < 0x800000000000l);
inst[0] = LUI | T(reg) | IMM(addr >> 32);
inst[1] = ORI | S(reg) | T(reg) | IMM((addr >> 16) & 0xffff);
@@ -493,12 +698,11 @@ static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label)
inst[4] = DSLL | T(reg) | D(reg) | SH_IMM(16);
inst += 4;
}
+#endif /* SLJIT_CONFIG_MIPS_32 */
inst[1] = ORI | S(reg) | T(reg) | IMM(addr & 0xffff);
}
-#endif
-
SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
{
struct sljit_memory_fragment *buf;
@@ -548,16 +752,17 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
/* These structures are ordered by their address. */
if (label && label->size == word_count) {
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
if (jump && jump->addr == word_count) {
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- jump->addr = (sljit_uw)(code_ptr - 3);
+ word_count += 2;
#else
- jump->addr = (sljit_uw)(code_ptr - 7);
+ word_count += 6;
#endif
- code_ptr = detect_jump_type(jump, code_ptr, code, executable_offset);
+ jump->addr = (sljit_uw)(code_ptr - 1);
+ code_ptr = detect_jump_type(jump, code, executable_offset);
jump = jump->next;
}
if (const_ && const_->addr == word_count) {
@@ -567,7 +772,10 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (put_label && put_label->addr == word_count) {
SLJIT_ASSERT(put_label->label);
put_label->addr = (sljit_uw)code_ptr;
-#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ code_ptr += 1;
+ word_count += 1;
+#else
code_ptr += put_label_get_length(put_label, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size));
word_count += 5;
#endif
@@ -575,8 +783,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
}
next_addr = compute_next_addr(label, jump, const_, put_label);
}
- code_ptr ++;
- word_count ++;
+ code_ptr++;
+ word_count++;
} while (buf_ptr < buf_end);
buf = buf->next;
@@ -584,7 +792,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (label && label->size == word_count) {
label->addr = (sljit_uw)code_ptr;
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
@@ -601,62 +809,32 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
buf_ptr = (sljit_ins *)jump->addr;
if (jump->flags & PATCH_B) {
- addr = (sljit_sw)(addr - ((sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset) + sizeof(sljit_ins))) >> 2;
+ addr = (sljit_uw)((sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset) - sizeof(sljit_ins)) >> 2);
SLJIT_ASSERT((sljit_sw)addr <= SIMM_MAX && (sljit_sw)addr >= SIMM_MIN);
- buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | (addr & 0xffff);
+ buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((sljit_ins)addr & 0xffff);
break;
}
if (jump->flags & PATCH_J) {
- SLJIT_ASSERT((addr & ~0xfffffff) == (((sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset) + sizeof(sljit_ins)) & ~0xfffffff));
- buf_ptr[0] |= (addr >> 2) & 0x03ffffff;
+ SLJIT_ASSERT((addr & ~(sljit_uw)0xfffffff)
+ == (((sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset) + sizeof(sljit_ins)) & ~(sljit_uw)0xfffffff));
+ buf_ptr[0] |= (sljit_ins)(addr >> 2) & 0x03ffffff;
break;
}
- /* Set the fields of immediate loads. */
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff);
- buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff);
-#else
- if (jump->flags & PATCH_ABS32) {
- SLJIT_ASSERT(addr <= 0x7fffffff);
- buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff);
- buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff);
- }
- else if (jump->flags & PATCH_ABS48) {
- SLJIT_ASSERT(addr <= 0x7fffffffffffl);
- buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 32) & 0xffff);
- buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | ((addr >> 16) & 0xffff);
- buf_ptr[3] = (buf_ptr[3] & 0xffff0000) | (addr & 0xffff);
- }
- else {
- buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 48) & 0xffff);
- buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | ((addr >> 32) & 0xffff);
- buf_ptr[3] = (buf_ptr[3] & 0xffff0000) | ((addr >> 16) & 0xffff);
- buf_ptr[5] = (buf_ptr[5] & 0xffff0000) | (addr & 0xffff);
- }
-#endif
+ load_addr_to_reg(jump, PIC_ADDR_REG);
} while (0);
jump = jump->next;
}
put_label = compiler->put_labels;
while (put_label) {
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- addr = put_label->label->addr;
- buf_ptr = (sljit_ins *)put_label->addr;
-
- SLJIT_ASSERT((buf_ptr[0] & 0xffe00000) == LUI && (buf_ptr[1] & 0xfc000000) == ORI);
- buf_ptr[0] |= (addr >> 16) & 0xffff;
- buf_ptr[1] |= addr & 0xffff;
-#else
- put_label_set(put_label);
-#endif
+ load_addr_to_reg(put_label, 0);
put_label = put_label->next;
}
compiler->error = SLJIT_ERR_COMPILED;
compiler->executable_offset = executable_offset;
- compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins);
+ compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_ins);
code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
@@ -673,33 +851,49 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
{
- sljit_sw fir = 0;
-
switch (feature_type) {
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
+ && (!defined(SLJIT_IS_FPU_AVAILABLE) || SLJIT_IS_FPU_AVAILABLE)
+ case SLJIT_HAS_F64_AS_F32_PAIR:
+ if (!cpu_feature_list)
+ get_cpu_features();
+
+ return (cpu_feature_list & CPU_FEATURE_FR) != 0;
+#endif /* SLJIT_CONFIG_MIPS_32 && SLJIT_IS_FPU_AVAILABLE */
case SLJIT_HAS_FPU:
-#ifdef SLJIT_IS_FPU_AVAILABLE
- return SLJIT_IS_FPU_AVAILABLE;
-#elif defined(__GNUC__)
- __asm__ ("cfc1 %0, $0" : "=r"(fir));
- return (fir >> 22) & 0x1;
-#else
-#error "FIR check is not implemented for this architecture"
-#endif
+ if (!cpu_feature_list)
+ get_cpu_features();
+
+ return (cpu_feature_list & CPU_FEATURE_FPU) != 0;
case SLJIT_HAS_ZERO_REGISTER:
+ case SLJIT_HAS_COPY_F32:
+ case SLJIT_HAS_COPY_F64:
return 1;
-
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
case SLJIT_HAS_CLZ:
case SLJIT_HAS_CMOV:
case SLJIT_HAS_PREFETCH:
return 1;
-#endif /* SLJIT_MIPS_REV >= 1 */
+ case SLJIT_HAS_CTZ:
+ return 2;
+#endif /* SLJIT_MIPS_REV >= 1 */
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
+ case SLJIT_HAS_REV:
+ case SLJIT_HAS_ROT:
+ return 1;
+#endif /* SLJIT_MIPS_REV >= 2 */
default:
- return fir;
+ return 0;
}
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
+{
+ SLJIT_UNUSED_ARG(type);
+ return 0;
+}
+
/* --------------------------------------------------------------------- */
/* Entry, exit */
/* --------------------------------------------------------------------- */
@@ -723,26 +917,26 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#define CUMULATIVE_OP 0x00080
#define LOGICAL_OP 0x00100
#define IMM_OP 0x00200
-#define SRC2_IMM 0x00400
+#define MOVE_OP 0x00400
+#define SRC2_IMM 0x00800
+
+#define UNUSED_DEST 0x01000
+#define REG_DEST 0x02000
+#define REG1_SOURCE 0x04000
+#define REG2_SOURCE 0x08000
+#define SLOW_SRC1 0x10000
+#define SLOW_SRC2 0x20000
+#define SLOW_DEST 0x40000
-#define UNUSED_DEST 0x00800
-#define REG_DEST 0x01000
-#define REG1_SOURCE 0x02000
-#define REG2_SOURCE 0x04000
-#define SLOW_SRC1 0x08000
-#define SLOW_SRC2 0x10000
-#define SLOW_DEST 0x20000
+static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw);
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 frame_size, sljit_ins *ins_ptr);
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-#define STACK_STORE SW
-#define STACK_LOAD LW
+#define SELECT_OP(a, b) (b)
#else
-#define STACK_STORE SD
-#define STACK_LOAD LD
+#define SELECT_OP(a, b) (!(op & SLJIT_32) ? a : b)
#endif
-static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw);
-
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
#include "sljitNativeMIPS_32.c"
#else
@@ -754,56 +948,207 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
sljit_ins base;
- sljit_s32 args, i, tmp, offs;
+ sljit_s32 i, tmp, offset;
+ sljit_s32 arg_count, word_arg_count, float_arg_count;
+ sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
CHECK_ERROR();
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET;
+ local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1);
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- local_size = (local_size + 15) & ~0xf;
+ if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ if ((local_size & SSIZE_OF(sw)) != 0)
+ local_size += SSIZE_OF(sw);
+ local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+ }
+
+ local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf;
#else
- local_size = (local_size + 31) & ~0x1f;
+ local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+ local_size = (local_size + SLJIT_LOCALS_OFFSET + 31) & ~0x1f;
#endif
compiler->local_size = local_size;
- if (local_size <= SIMM_MAX) {
+ offset = 0;
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ if (!(options & SLJIT_ENTER_REG_ARG)) {
+ tmp = arg_types >> SLJIT_ARG_SHIFT;
+ arg_count = 0;
+
+ while (tmp) {
+ offset = arg_count;
+ if ((tmp & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F64) {
+ if ((arg_count & 0x1) != 0)
+ arg_count++;
+ arg_count++;
+ }
+
+ arg_count++;
+ tmp >>= SLJIT_ARG_SHIFT;
+ }
+
+ compiler->args_size = (sljit_uw)arg_count << 2;
+ offset = (offset >= 4) ? (offset << 2) : 0;
+ }
+#endif /* SLJIT_CONFIG_MIPS_32 */
+
+ if (local_size + offset <= -SIMM_MIN) {
/* Frequent case. */
FAIL_IF(push_inst(compiler, ADDIU_W | S(SLJIT_SP) | T(SLJIT_SP) | IMM(-local_size), DR(SLJIT_SP)));
base = S(SLJIT_SP);
- offs = local_size - (sljit_sw)sizeof(sljit_sw);
- }
- else {
- FAIL_IF(load_immediate(compiler, DR(OTHER_FLAG), local_size));
+ offset = local_size - SSIZE_OF(sw);
+ } else {
+ FAIL_IF(load_immediate(compiler, OTHER_FLAG, local_size));
FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SP) | TA(0) | D(TMP_REG2), DR(TMP_REG2)));
- FAIL_IF(push_inst(compiler, SUBU_W | S(SLJIT_SP) | T(OTHER_FLAG) | D(SLJIT_SP), DR(SLJIT_SP)));
+ FAIL_IF(push_inst(compiler, SUBU_W | S(SLJIT_SP) | TA(OTHER_FLAG) | D(SLJIT_SP), DR(SLJIT_SP)));
base = S(TMP_REG2);
+ offset = -SSIZE_OF(sw);
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
local_size = 0;
- offs = -(sljit_sw)sizeof(sljit_sw);
+#endif
}
- FAIL_IF(push_inst(compiler, STACK_STORE | base | TA(RETURN_ADDR_REG) | IMM(offs), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, STORE_W | base | TA(RETURN_ADDR_REG) | IMM(offset), UNMOVABLE_INS));
- tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = SLJIT_S0; i >= tmp; i--) {
- offs -= (sljit_s32)(sizeof(sljit_sw));
- FAIL_IF(push_inst(compiler, STACK_STORE | base | T(i) | IMM(offs), MOVABLE_INS));
+ tmp = SLJIT_S0 - saveds;
+ for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STORE_W | base | T(i) | IMM(offset), MOVABLE_INS));
}
for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
- offs -= (sljit_s32)(sizeof(sljit_sw));
- FAIL_IF(push_inst(compiler, STACK_STORE | base | T(i) | IMM(offs), MOVABLE_INS));
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STORE_W | base | T(i) | IMM(offset), MOVABLE_INS));
+ }
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ /* This alignment is valid because offset is not used after storing FPU regs. */
+ if ((offset & SSIZE_OF(sw)) != 0)
+ offset -= SSIZE_OF(sw);
+#endif
+
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, SDC1 | base | FT(i) | IMM(offset), MOVABLE_INS));
}
- args = get_arg_count(arg_types);
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, SDC1 | base | FT(i) | IMM(offset), MOVABLE_INS));
+ }
+
+ if (options & SLJIT_ENTER_REG_ARG)
+ return SLJIT_SUCCESS;
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ arg_count = 0;
+ word_arg_count = 0;
+ float_arg_count = 0;
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ /* The first maximum two floating point arguments are passed in floating point
+ registers if no integer argument precedes them. The first 16 byte data is
+ passed in four integer registers, the rest is placed onto the stack.
+ The floating point registers are also part of the first 16 byte data, so
+ their corresponding integer registers are not used when they are present. */
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ float_arg_count++;
+ if ((arg_count & 0x1) != 0)
+ arg_count++;
+
+ if (word_arg_count == 0 && float_arg_count <= 2) {
+ if (float_arg_count == 1)
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT_D) | FS(TMP_FREG1) | FD(SLJIT_FR0), MOVABLE_INS));
+ } else if (arg_count < 4) {
+ FAIL_IF(push_inst(compiler, MTC1 | TA(4 + arg_count) | FS(float_arg_count), MOVABLE_INS));
+ switch (cpu_feature_list & CPU_FEATURE_FR) {
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+ case CPU_FEATURE_FR:
+ FAIL_IF(push_inst(compiler, MTHC1 | TA(5 + arg_count) | FS(float_arg_count), MOVABLE_INS));
+ break;
+#endif /* SLJIT_MIPS_REV >= 2 */
+ default:
+ FAIL_IF(push_inst(compiler, MTC1 | TA(5 + arg_count) | FS(float_arg_count) | (1 << 11), MOVABLE_INS));
+ break;
+ }
+ } else
+ FAIL_IF(push_inst(compiler, LDC1 | base | FT(float_arg_count) | IMM(local_size + (arg_count << 2)), MOVABLE_INS));
+ arg_count++;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ float_arg_count++;
+
+ if (word_arg_count == 0 && float_arg_count <= 2) {
+ if (float_arg_count == 1)
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT_S) | FS(TMP_FREG1) | FD(SLJIT_FR0), MOVABLE_INS));
+ } else if (arg_count < 4)
+ FAIL_IF(push_inst(compiler, MTC1 | TA(4 + arg_count) | FS(float_arg_count), MOVABLE_INS));
+ else
+ FAIL_IF(push_inst(compiler, LWC1 | base | FT(float_arg_count) | IMM(local_size + (arg_count << 2)), MOVABLE_INS));
+ break;
+ default:
+ word_arg_count++;
+
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ tmp = SLJIT_S0 - saved_arg_count;
+ saved_arg_count++;
+ } else if (word_arg_count != arg_count + 1 || arg_count == 0)
+ tmp = word_arg_count;
+ else
+ break;
+
+ if (arg_count < 4)
+ FAIL_IF(push_inst(compiler, ADDU_W | SA(4 + arg_count) | TA(0) | D(tmp), DR(tmp)));
+ else
+ FAIL_IF(push_inst(compiler, LW | base | T(tmp) | IMM(local_size + (arg_count << 2)), DR(tmp)));
+ break;
+ }
+ arg_count++;
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
- if (args >= 1)
- FAIL_IF(push_inst(compiler, ADDU_W | SA(4) | TA(0) | D(SLJIT_S0), DR(SLJIT_S0)));
- if (args >= 2)
- FAIL_IF(push_inst(compiler, ADDU_W | SA(5) | TA(0) | D(SLJIT_S1), DR(SLJIT_S1)));
- if (args >= 3)
- FAIL_IF(push_inst(compiler, ADDU_W | SA(6) | TA(0) | D(SLJIT_S2), DR(SLJIT_S2)));
+ SLJIT_ASSERT(compiler->args_size == (sljit_uw)arg_count << 2);
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ while (arg_types) {
+ arg_count++;
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ float_arg_count++;
+ if (arg_count != float_arg_count)
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT_D) | FS(arg_count) | FD(float_arg_count), MOVABLE_INS));
+ else if (arg_count == 1)
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT_D) | FS(TMP_FREG1) | FD(SLJIT_FR0), MOVABLE_INS));
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ float_arg_count++;
+ if (arg_count != float_arg_count)
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT_S) | FS(arg_count) | FD(float_arg_count), MOVABLE_INS));
+ else if (arg_count == 1)
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT_S) | FS(TMP_FREG1) | FD(SLJIT_FR0), MOVABLE_INS));
+ break;
+ default:
+ word_arg_count++;
+
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ tmp = SLJIT_S0 - saved_arg_count;
+ saved_arg_count++;
+ } else if (word_arg_count != arg_count || word_arg_count <= 1)
+ tmp = word_arg_count;
+ else
+ break;
+
+ FAIL_IF(push_inst(compiler, ADDU_W | SA(3 + arg_count) | TA(0) | D(tmp), DR(tmp)));
+ break;
+ }
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
+#endif /* SLJIT_CONFIG_MIPS_32 */
return SLJIT_SUCCESS;
}
@@ -816,61 +1161,149 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET;
+ local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1);
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- compiler->local_size = (local_size + 15) & ~0xf;
+ if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ if ((local_size & SSIZE_OF(sw)) != 0)
+ local_size += SSIZE_OF(sw);
+ local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+ }
+
+ compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf;
#else
- compiler->local_size = (local_size + 31) & ~0x1f;
+ local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+ compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 31) & ~0x1f;
#endif
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 frame_size, sljit_ins *ins_ptr)
{
- sljit_s32 local_size, i, tmp, offs;
- sljit_ins base;
+ sljit_s32 local_size, i, tmp, offset;
+ sljit_s32 load_return_addr = (frame_size == 0);
+ sljit_s32 scratches = compiler->scratches;
+ sljit_s32 saveds = compiler->saveds;
+ sljit_s32 fsaveds = compiler->fsaveds;
+ sljit_s32 fscratches = compiler->fscratches;
+ sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
- CHECK_ERROR();
- CHECK(check_sljit_emit_return(compiler, op, src, srcw));
-
- FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
+ SLJIT_ASSERT(frame_size == 1 || (frame_size & 0xf) == 0);
+ frame_size &= ~0xf;
local_size = compiler->local_size;
- if (local_size <= SIMM_MAX)
- base = S(SLJIT_SP);
- else {
- FAIL_IF(load_immediate(compiler, DR(TMP_REG1), local_size));
- FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SP) | T(TMP_REG1) | D(TMP_REG1), DR(TMP_REG1)));
- base = S(TMP_REG1);
- local_size = 0;
+
+ tmp = GET_SAVED_REGISTERS_SIZE(scratches, saveds - kept_saveds_count, 1);
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ if ((tmp & SSIZE_OF(sw)) != 0)
+ tmp += SSIZE_OF(sw);
+ tmp += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
}
+#else
+ tmp += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+#endif
+
+ if (local_size <= SIMM_MAX) {
+ if (local_size < frame_size) {
+ FAIL_IF(push_inst(compiler, ADDIU_W | S(SLJIT_SP) | T(SLJIT_SP) | IMM(local_size - frame_size), DR(SLJIT_SP)));
+ local_size = frame_size;
+ }
+ } else {
+ if (tmp < frame_size)
+ tmp = frame_size;
+
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG1), local_size - tmp));
+ FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SP) | T(TMP_REG1) | D(SLJIT_SP), DR(SLJIT_SP)));
+ local_size = tmp;
+ }
+
+ SLJIT_ASSERT(local_size >= frame_size);
- FAIL_IF(push_inst(compiler, STACK_LOAD | base | TA(RETURN_ADDR_REG) | IMM(local_size - (sljit_s32)sizeof(sljit_sw)), RETURN_ADDR_REG));
- offs = local_size - (sljit_s32)GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 1);
+ offset = local_size - SSIZE_OF(sw);
+ if (load_return_addr)
+ FAIL_IF(push_inst(compiler, LOAD_W | S(SLJIT_SP) | TA(RETURN_ADDR_REG) | IMM(offset), RETURN_ADDR_REG));
- tmp = compiler->scratches;
- for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) {
- FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(i) | IMM(offs), DR(i)));
- offs += (sljit_s32)(sizeof(sljit_sw));
+ tmp = SLJIT_S0 - saveds;
+ for (i = SLJIT_S0 - kept_saveds_count; i > tmp; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, LOAD_W | S(SLJIT_SP) | T(i) | IMM(offset), MOVABLE_INS));
}
- tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = tmp; i <= SLJIT_S0; i++) {
- FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(i) | IMM(offs), DR(i)));
- offs += (sljit_s32)(sizeof(sljit_sw));
+ for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, LOAD_W | S(SLJIT_SP) | T(i) | IMM(offset), MOVABLE_INS));
}
- SLJIT_ASSERT(offs == local_size - (sljit_sw)(sizeof(sljit_sw)));
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ /* This alignment is valid because offset is not used after storing FPU regs. */
+ if ((offset & SSIZE_OF(sw)) != 0)
+ offset -= SSIZE_OF(sw);
+#endif
- FAIL_IF(push_inst(compiler, JR | SA(RETURN_ADDR_REG), UNMOVABLE_INS));
- if (compiler->local_size <= SIMM_MAX)
- return push_inst(compiler, ADDIU_W | S(SLJIT_SP) | T(SLJIT_SP) | IMM(compiler->local_size), UNMOVABLE_INS);
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, LDC1 | S(SLJIT_SP) | FT(i) | IMM(offset), MOVABLE_INS));
+ }
+
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, LDC1 | S(SLJIT_SP) | FT(i) | IMM(offset), MOVABLE_INS));
+ }
+
+ if (local_size > frame_size)
+ *ins_ptr = ADDIU_W | S(SLJIT_SP) | T(SLJIT_SP) | IMM(local_size - frame_size);
else
- return push_inst(compiler, ADDU_W | S(TMP_REG1) | TA(0) | D(SLJIT_SP), UNMOVABLE_INS);
+ *ins_ptr = NOP;
+
+ return SLJIT_SUCCESS;
}
-#undef STACK_STORE
-#undef STACK_LOAD
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
+{
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
+
+ emit_stack_frame_release(compiler, 0, &ins);
+
+ FAIL_IF(push_inst(compiler, JR | SA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ return push_inst(compiler, ins, UNMOVABLE_INS);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_to(compiler, src, srcw));
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
+ src = PIC_ADDR_REG;
+ srcw = 0;
+ } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, ADDU_W | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
+ src = PIC_ADDR_REG;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 1, &ins));
+
+ if (src != SLJIT_IMM) {
+ FAIL_IF(push_inst(compiler, JR | S(src), UNMOVABLE_INS));
+ return push_inst(compiler, ins, UNMOVABLE_INS);
+ }
+
+ if (ins != NOP)
+ FAIL_IF(push_inst(compiler, ins, MOVABLE_INS));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
+}
/* --------------------------------------------------------------------- */
/* Operators */
@@ -927,9 +1360,10 @@ static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flag
return 0;
}
+#define TO_ARGW_HI(argw) (((argw) & ~0xffff) + (((argw) & 0x8000) ? 0x10000 : 0))
+
/* See getput_arg below.
- Note: can_cache is called only for binary operators. Those
- operators always uses word arguments without write back. */
+ Note: can_cache is called only for binary operators. */
static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
{
SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM));
@@ -944,7 +1378,8 @@ static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, slj
}
if (arg == next_arg) {
- if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN))
+ if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN)
+ || TO_ARGW_HI(argw) == TO_ARGW_HI(next_argw))
return 1;
return 0;
}
@@ -956,6 +1391,7 @@ static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, slj
static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
{
sljit_s32 tmp_ar, base, delay_slot;
+ sljit_sw offset, argw_hi;
SLJIT_ASSERT(arg & SLJIT_MEM);
if (!(next_arg & SLJIT_MEM)) {
@@ -963,6 +1399,8 @@ static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sl
next_argw = 0;
}
+ /* Since tmp can be the same as base or offset registers,
+ * these might be unavailable after modifying tmp. */
if ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) {
tmp_ar = reg_ar;
delay_slot = reg_ar;
@@ -1010,38 +1448,42 @@ static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sl
return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot);
}
- if (compiler->cache_arg == arg && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) {
- if (argw != compiler->cache_argw) {
- FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3)));
- compiler->cache_argw = argw;
- }
- return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot);
- }
+ if (compiler->cache_arg == arg && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN)
+ return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar) | IMM(argw - compiler->cache_argw), delay_slot);
- if (compiler->cache_arg == SLJIT_MEM && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) {
- if (argw != compiler->cache_argw)
- FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3)));
- }
- else {
+ if (compiler->cache_arg == SLJIT_MEM && (argw - compiler->cache_argw) <= SIMM_MAX && (argw - compiler->cache_argw) >= SIMM_MIN) {
+ offset = argw - compiler->cache_argw;
+ } else {
compiler->cache_arg = SLJIT_MEM;
- FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw));
+
+ argw_hi = TO_ARGW_HI(argw);
+
+ if (next_arg && next_argw - argw <= SIMM_MAX && next_argw - argw >= SIMM_MIN && argw_hi != TO_ARGW_HI(next_argw)) {
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw));
+ compiler->cache_argw = argw;
+ offset = 0;
+ } else {
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw_hi));
+ compiler->cache_argw = argw_hi;
+ offset = argw & 0xffff;
+ argw = argw_hi;
+ }
}
- compiler->cache_argw = argw;
if (!base)
- return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot);
+ return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar) | IMM(offset), delay_slot);
if (arg == next_arg && next_argw - argw <= SIMM_MAX && next_argw - argw >= SIMM_MIN) {
compiler->cache_arg = arg;
FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG3) | T(base) | D(TMP_REG3), DR(TMP_REG3)));
- return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot);
+ return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar) | IMM(offset), delay_slot);
}
FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG3) | T(base) | DA(tmp_ar), tmp_ar));
- return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot);
+ return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar) | IMM(offset), delay_slot);
}
-static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw)
+static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw)
{
sljit_s32 tmp_ar, base, delay_slot;
@@ -1063,19 +1505,19 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
if (SLJIT_UNLIKELY(argw)) {
FAIL_IF(push_inst(compiler, SLL_W | T(OFFS_REG(arg)) | DA(tmp_ar) | SH_IMM(argw), tmp_ar));
- FAIL_IF(push_inst(compiler, ADDU_W | S(base) | TA(tmp_ar) | DA(tmp_ar), tmp_ar));
+ FAIL_IF(push_inst(compiler, ADDU_W | SA(tmp_ar) | T(base) | DA(tmp_ar), tmp_ar));
}
else
FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(OFFS_REG(arg)) | DA(tmp_ar), tmp_ar));
return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot);
}
- FAIL_IF(load_immediate(compiler, tmp_ar, argw));
+ FAIL_IF(load_immediate(compiler, tmp_ar, TO_ARGW_HI(argw)));
if (base != 0)
- FAIL_IF(push_inst(compiler, ADDU_W | S(base) | TA(tmp_ar) | DA(tmp_ar), tmp_ar));
+ FAIL_IF(push_inst(compiler, ADDU_W | SA(tmp_ar) | T(base) | DA(tmp_ar), tmp_ar));
- return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot);
+ return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar) | IMM(argw), delay_slot);
}
static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w)
@@ -1085,6 +1527,750 @@ static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, slji
return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w);
}
+#define EMIT_LOGICAL(op_imm, op_reg) \
+ if (flags & SRC2_IMM) { \
+ if (op & SLJIT_SET_Z) \
+ FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \
+ if (!(flags & UNUSED_DEST)) \
+ FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \
+ } \
+ else { \
+ if (op & SLJIT_SET_Z) \
+ FAIL_IF(push_inst(compiler, op_reg | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \
+ if (!(flags & UNUSED_DEST)) \
+ FAIL_IF(push_inst(compiler, op_reg | S(src1) | T(src2) | D(dst), DR(dst))); \
+ }
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+
+#define EMIT_SHIFT(dimm, dimm32, imm, dv, v) \
+ op_imm = (imm); \
+ op_v = (v);
+
+#else /* !SLJIT_CONFIG_MIPS_32 */
+
+
+#define EMIT_SHIFT(dimm, dimm32, imm, dv, v) \
+ op_dimm = (dimm); \
+ op_dimm32 = (dimm32); \
+ op_imm = (imm); \
+ op_dv = (dv); \
+ op_v = (v);
+
+#endif /* SLJIT_CONFIG_MIPS_32 */
+
+#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV < 1)
+
+static sljit_s32 emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw src)
+{
+ sljit_s32 is_clz = (GET_OPCODE(op) == SLJIT_CLZ);
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ sljit_ins word_size = (op & SLJIT_32) ? 32 : 64;
+#else /* !SLJIT_CONFIG_MIPS_64 */
+ sljit_ins word_size = 32;
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+ /* The TMP_REG2 is the next value. */
+ if (src != TMP_REG2)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src) | TA(0) | D(TMP_REG2), DR(TMP_REG2)));
+
+ FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG2) | TA(0) | IMM(is_clz ? 13 : 14), UNMOVABLE_INS));
+ /* The OTHER_FLAG is the counter. Delay slot. */
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(OTHER_FLAG) | IMM(word_size), OTHER_FLAG));
+
+ if (!is_clz) {
+ FAIL_IF(push_inst(compiler, ANDI | S(TMP_REG2) | T(TMP_REG1) | IMM(1), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, BNE | S(TMP_REG1) | TA(0) | IMM(11), UNMOVABLE_INS));
+ } else
+ FAIL_IF(push_inst(compiler, BLTZ | S(TMP_REG2) | TA(0) | IMM(11), UNMOVABLE_INS));
+
+ /* Delay slot. */
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(OTHER_FLAG) | IMM(0), OTHER_FLAG));
+
+ /* The TMP_REG1 is the next shift. */
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | T(TMP_REG1) | IMM(word_size), DR(TMP_REG1)));
+
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(TMP_REG2) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSRL, SRL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), DR(TMP_REG1)));
+
+ FAIL_IF(push_inst(compiler, (is_clz ? SELECT_OP(DSRLV, SRLV) : SELECT_OP(DSLLV, SLLV)) | S(TMP_REG1) | TA(EQUAL_FLAG) | D(TMP_REG2), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, BNE | S(TMP_REG2) | TA(0) | IMM(-4), UNMOVABLE_INS));
+ /* Delay slot. */
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(TMP_REG1) | T(TMP_REG2) | IMM(-1), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, (is_clz ? SELECT_OP(DSRLV, SRLV) : SELECT_OP(DSLLV, SLLV)) | S(TMP_REG2) | TA(EQUAL_FLAG) | D(TMP_REG2), DR(TMP_REG2)));
+
+ FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG2) | TA(0) | IMM(-7), UNMOVABLE_INS));
+ /* Delay slot. */
+ FAIL_IF(push_inst(compiler, OR | SA(OTHER_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG));
+
+ return push_inst(compiler, SELECT_OP(DADDU, ADDU) | SA(OTHER_FLAG) | TA(0) | D(dst), DR(dst));
+}
+
+#endif /* SLJIT_MIPS_REV < 1 */
+
+static sljit_s32 emit_rev(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw src)
+{
+#if defined(SLJIT_CONFIG_MIPS_64) && SLJIT_CONFIG_MIPS_64
+ int is_32 = (op & SLJIT_32);
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+ op = GET_OPCODE(op);
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+#if defined(SLJIT_CONFIG_MIPS_64) && SLJIT_CONFIG_MIPS_64
+ if (!is_32 && (op == SLJIT_REV)) {
+ FAIL_IF(push_inst(compiler, DSBH | T(src) | D(dst), DR(dst)));
+ return push_inst(compiler, DSHD | T(dst) | D(dst), DR(dst));
+ }
+ if (op != SLJIT_REV && src != TMP_REG2) {
+ FAIL_IF(push_inst(compiler, SLL | T(src) | D(TMP_REG1), DR(TMP_REG1)));
+ src = TMP_REG1;
+ }
+#endif /* SLJIT_CONFIG_MIPS_64 */
+ FAIL_IF(push_inst(compiler, WSBH | T(src) | D(dst), DR(dst)));
+ FAIL_IF(push_inst(compiler, ROTR | T(dst) | D(dst) | SH_IMM(16), DR(dst)));
+#if defined(SLJIT_CONFIG_MIPS_64) && SLJIT_CONFIG_MIPS_64
+ if (op == SLJIT_REV_U32 && dst != TMP_REG2 && dst != TMP_REG3)
+ FAIL_IF(push_inst(compiler, DINSU | T(dst) | SA(0) | (31 << 11), DR(dst)));
+#endif /* SLJIT_CONFIG_MIPS_64 */
+#else /* SLJIT_MIPS_REV < 2 */
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ if (!is_32) {
+ FAIL_IF(push_inst(compiler, DSRL32 | T(src) | D(TMP_REG1) | SH_IMM(0), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, ORI | SA(0) | TA(OTHER_FLAG) | 0xffff, OTHER_FLAG));
+ FAIL_IF(push_inst(compiler, DSLL32 | T(src) | D(dst) | SH_IMM(0), DR(dst)));
+ FAIL_IF(push_inst(compiler, DSLL32 | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(0), OTHER_FLAG));
+ FAIL_IF(push_inst(compiler, OR | S(dst) | T(TMP_REG1) | D(dst), DR(dst)));
+
+ FAIL_IF(push_inst(compiler, DSRL | T(dst) | D(TMP_REG1) | SH_IMM(16), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, ORI | SA(OTHER_FLAG) | TA(OTHER_FLAG) | 0xffff, OTHER_FLAG));
+ FAIL_IF(push_inst(compiler, AND | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
+ FAIL_IF(push_inst(compiler, AND | S(TMP_REG1) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, DSLL | TA(OTHER_FLAG) | DA(EQUAL_FLAG) | SH_IMM(8), EQUAL_FLAG));
+ FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(16), DR(dst)));
+ FAIL_IF(push_inst(compiler, XOR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
+ FAIL_IF(push_inst(compiler, OR | S(dst) | T(TMP_REG1) | D(dst), DR(dst)));
+
+ FAIL_IF(push_inst(compiler, DSRL | T(dst) | D(TMP_REG1) | SH_IMM(8), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, AND | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
+ FAIL_IF(push_inst(compiler, AND | S(TMP_REG1) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(8), DR(dst)));
+ return push_inst(compiler, OR | S(dst) | T(TMP_REG1) | D(dst), DR(dst));
+ }
+
+ if (op != SLJIT_REV && src != TMP_REG2) {
+ FAIL_IF(push_inst(compiler, SLL | T(src) | D(TMP_REG2) | SH_IMM(0), DR(TMP_REG2)));
+ src = TMP_REG2;
+ }
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+ FAIL_IF(push_inst(compiler, SRL | T(src) | D(TMP_REG1) | SH_IMM(16), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, LUI | TA(OTHER_FLAG) | 0xff, OTHER_FLAG));
+ FAIL_IF(push_inst(compiler, SLL | T(src) | D(dst) | SH_IMM(16), DR(dst)));
+ FAIL_IF(push_inst(compiler, ORI | SA(OTHER_FLAG) | TA(OTHER_FLAG) | 0xff, OTHER_FLAG));
+ FAIL_IF(push_inst(compiler, OR | S(dst) | T(TMP_REG1) | D(dst), DR(dst)));
+
+ FAIL_IF(push_inst(compiler, SRL | T(dst) | D(TMP_REG1) | SH_IMM(8), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, AND | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
+ FAIL_IF(push_inst(compiler, AND | S(TMP_REG1) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, SLL | T(dst) | D(dst) | SH_IMM(8), DR(dst)));
+ FAIL_IF(push_inst(compiler, OR | S(dst) | T(TMP_REG1) | D(dst), DR(dst)));
+
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ if (op == SLJIT_REV_U32 && dst != TMP_REG2 && dst != TMP_REG3) {
+ FAIL_IF(push_inst(compiler, DSLL32 | T(dst) | D(dst) | SH_IMM(0), DR(dst)));
+ FAIL_IF(push_inst(compiler, DSRL32 | T(dst) | D(dst) | SH_IMM(0), DR(dst)));
+ }
+#endif /* SLJIT_CONFIG_MIPS_64 */
+#endif /* SLJIT_MIPR_REV >= 2 */
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 emit_rev16(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw src)
+{
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+#if defined(SLJIT_CONFIG_MIPS_32) && SLJIT_CONFIG_MIPS_32
+ FAIL_IF(push_inst(compiler, WSBH | T(src) | D(dst), DR(dst)));
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ FAIL_IF(push_inst(compiler, DSBH | T(src) | D(dst), DR(dst)));
+#endif /* SLJIT_CONFIG_MIPS_32 */
+ if (GET_OPCODE(op) == SLJIT_REV_U16)
+ return push_inst(compiler, ANDI | S(dst) | T(dst) | 0xffff, DR(dst));
+ else
+ return push_inst(compiler, SEH | T(dst) | D(dst), DR(dst));
+#else /* SLJIT_MIPS_REV < 2 */
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSRL, SRL) | T(src) | D(TMP_REG1) | SH_IMM(8), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | T(src) | D(dst) | SH_IMM(24), DR(dst)));
+ FAIL_IF(push_inst(compiler, ANDI | S(TMP_REG1) | T(TMP_REG1) | 0xff, DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, (GET_OPCODE(op) == SLJIT_REV_U16 ? SELECT_OP(DSRL32, SRL) : SELECT_OP(DSRA32, SRA)) | T(dst) | D(dst) | SH_IMM(16), DR(dst)));
+ return push_inst(compiler, OR | S(dst) | T(TMP_REG1) | D(dst), DR(dst));
+#endif /* SLJIT_MIPS_REV >= 2 */
+}
+
+static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
+ sljit_s32 dst, sljit_s32 src1, sljit_sw src2)
+{
+ sljit_s32 is_overflow, is_carry, carry_src_ar, is_handled;
+ sljit_ins op_imm, op_v;
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ sljit_ins ins, op_dimm, op_dimm32, op_dv;
+#endif
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if (dst != src2)
+ return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(dst), DR(dst));
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_U8:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE))
+ return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst));
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_S8:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
+ return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst));
+#else /* SLJIT_MIPS_REV < 2 */
+ FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(24), DR(dst)));
+ return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(24), DR(dst));
+#endif /* SLJIT_MIPS_REV >= 2 */
+#else /* !SLJIT_CONFIG_MIPS_32 */
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
+ if (op & SLJIT_32)
+ return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst));
+#endif /* SLJIT_MIPS_REV >= 2 */
+ FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(24), DR(dst)));
+ return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(24), DR(dst));
+#endif /* SLJIT_CONFIG_MIPS_32 */
+ }
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_U16:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE))
+ return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst));
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_S16:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
+ return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst));
+#else /* SLJIT_MIPS_REV < 2 */
+ FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(16), DR(dst)));
+ return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(16), DR(dst));
+#endif /* SLJIT_MIPS_REV >= 2 */
+#else /* !SLJIT_CONFIG_MIPS_32 */
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
+ if (op & SLJIT_32)
+ return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst));
+#endif /* SLJIT_MIPS_REV >= 2 */
+ FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(16), DR(dst)));
+ return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(16), DR(dst));
+#endif /* SLJIT_CONFIG_MIPS_32 */
+ }
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ case SLJIT_MOV_U32:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && !(op & SLJIT_32));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
+ if (dst == src2)
+ return push_inst(compiler, DINSU | T(src2) | SA(0) | (31 << 11), DR(dst));
+#endif /* SLJIT_MIPS_REV >= 2 */
+ FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(0), DR(dst)));
+ return push_inst(compiler, DSRL32 | T(dst) | D(dst) | SH_IMM(0), DR(dst));
+ }
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_S32:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && !(op & SLJIT_32));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
+ return push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(0), DR(dst));
+ }
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
+ case SLJIT_CLZ:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
+ return push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | D(dst), DR(dst));
+#else /* SLJIT_MIPS_REV < 6 */
+ return push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | T(dst) | D(dst), DR(dst));
+#endif /* SLJIT_MIPS_REV >= 6 */
+ case SLJIT_CTZ:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(0) | T(src2) | D(TMP_REG1), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, AND | S(src2) | T(TMP_REG1) | D(dst), DR(dst)));
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(dst) | D(dst), DR(dst)));
+#else /* SLJIT_MIPS_REV < 6 */
+ FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(dst) | T(dst) | D(dst), DR(dst)));
+#endif /* SLJIT_MIPS_REV >= 6 */
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(dst) | T(TMP_REG1) | IMM(SELECT_OP(-64, -32)), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(SELECT_OP(26, 27)), DR(TMP_REG1)));
+ return push_inst(compiler, XOR | S(dst) | T(TMP_REG1) | D(dst), DR(dst));
+#else /* SLJIT_MIPS_REV < 1 */
+ case SLJIT_CLZ:
+ case SLJIT_CTZ:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ return emit_clz_ctz(compiler, op, dst, src2);
+#endif /* SLJIT_MIPS_REV >= 1 */
+
+ case SLJIT_REV:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && src2 != TMP_REG1 && dst != TMP_REG1);
+ return emit_rev(compiler, op, dst, src2);
+
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ return emit_rev16(compiler, op, dst, src2);
+
+ case SLJIT_ADD:
+ /* Overflow computation (both add and sub): overflow = src1_sign ^ src2_sign ^ result_sign ^ carry_flag */
+ is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
+ carry_src_ar = GET_FLAG_TYPE(op) == SLJIT_CARRY;
+
+ if (flags & SRC2_IMM) {
+ if (is_overflow) {
+ if (src2 >= 0)
+ FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ else
+ FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ }
+ else if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
+
+ /* Only the zero flag is needed. */
+ if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst)));
+ }
+ else {
+ if (is_overflow)
+ FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ else if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
+
+ if (is_overflow || carry_src_ar != 0) {
+ if (src1 != dst)
+ carry_src_ar = DR(src1);
+ else if (src2 != dst)
+ carry_src_ar = DR(src2);
+ else {
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | TA(0) | DA(OTHER_FLAG), OTHER_FLAG));
+ carry_src_ar = OTHER_FLAG;
+ }
+ }
+
+ /* Only the zero flag is needed. */
+ if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst)));
+ }
+
+ /* Carry is zero if a + b >= a or a + b >= b, otherwise it is 1. */
+ if (is_overflow || carry_src_ar != 0) {
+ if (flags & SRC2_IMM)
+ FAIL_IF(push_inst(compiler, SLTIU | S(dst) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
+ else
+ FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(carry_src_ar) | DA(OTHER_FLAG), OTHER_FLAG));
+ }
+
+ if (!is_overflow)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
+ return push_inst(compiler, XOR | S(TMP_REG1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
+
+ case SLJIT_ADDC:
+ carry_src_ar = GET_FLAG_TYPE(op) == SLJIT_CARRY;
+
+ if (flags & SRC2_IMM) {
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst)));
+ } else {
+ if (carry_src_ar != 0) {
+ if (src1 != dst)
+ carry_src_ar = DR(src1);
+ else if (src2 != dst)
+ carry_src_ar = DR(src2);
+ else {
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ carry_src_ar = EQUAL_FLAG;
+ }
+ }
+
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst)));
+ }
+
+ /* Carry is zero if a + b >= a or a + b >= b, otherwise it is 1. */
+ if (carry_src_ar != 0) {
+ if (flags & SRC2_IMM)
+ FAIL_IF(push_inst(compiler, SLTIU | S(dst) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
+ else
+ FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(carry_src_ar) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ }
+
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
+
+ if (carry_src_ar == 0)
+ return SLJIT_SUCCESS;
+
+ /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */
+ FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
+ /* Set carry flag. */
+ return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
+
+ case SLJIT_SUB:
+ if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
+ FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
+ src2 = TMP_REG2;
+ flags &= ~SRC2_IMM;
+ }
+
+ is_handled = 0;
+
+ if (flags & SRC2_IMM) {
+ if (GET_FLAG_TYPE(op) == SLJIT_LESS) {
+ FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
+ is_handled = 1;
+ }
+ else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS) {
+ FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
+ is_handled = 1;
+ }
+ }
+
+ if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) {
+ is_handled = 1;
+
+ if (flags & SRC2_IMM) {
+ FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
+ src2 = TMP_REG2;
+ flags &= ~SRC2_IMM;
+ }
+
+ switch (GET_FLAG_TYPE(op)) {
+ case SLJIT_LESS:
+ FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
+ break;
+ case SLJIT_GREATER:
+ FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
+ break;
+ case SLJIT_SIG_LESS:
+ FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
+ break;
+ case SLJIT_SIG_GREATER:
+ FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
+ break;
+ }
+ }
+
+ if (is_handled) {
+ if (flags & SRC2_IMM) {
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
+ if (!(flags & UNUSED_DEST))
+ return push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst));
+ }
+ else {
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ if (!(flags & UNUSED_DEST))
+ return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst));
+ }
+ return SLJIT_SUCCESS;
+ }
+
+ is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
+ is_carry = GET_FLAG_TYPE(op) == SLJIT_CARRY;
+
+ if (flags & SRC2_IMM) {
+ if (is_overflow) {
+ if (src2 >= 0)
+ FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ else
+ FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ }
+ else if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
+
+ if (is_overflow || is_carry)
+ FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
+
+ /* Only the zero flag is needed. */
+ if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst)));
+ }
+ else {
+ if (is_overflow)
+ FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ else if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
+
+ if (is_overflow || is_carry)
+ FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
+
+ /* Only the zero flag is needed. */
+ if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst)));
+ }
+
+ if (!is_overflow)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
+ return push_inst(compiler, XOR | S(TMP_REG1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
+
+ case SLJIT_SUBC:
+ if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
+ FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
+ src2 = TMP_REG2;
+ flags &= ~SRC2_IMM;
+ }
+
+ is_carry = GET_FLAG_TYPE(op) == SLJIT_CARRY;
+
+ if (flags & SRC2_IMM) {
+ if (is_carry)
+ FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
+
+ FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst)));
+ }
+ else {
+ if (is_carry)
+ FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
+
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst)));
+ }
+
+ if (is_carry)
+ FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
+
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
+
+ if (!is_carry)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG);
+
+ case SLJIT_MUL:
+ SLJIT_ASSERT(!(flags & SRC2_IMM));
+
+ if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW) {
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
+ return push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst));
+#elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst));
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ if (op & SLJIT_32)
+ return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst));
+ FAIL_IF(push_inst(compiler, DMULT | S(src1) | T(src2), MOVABLE_INS));
+ return push_inst(compiler, MFLO | D(dst), DR(dst));
+#endif /* SLJIT_CONFIG_MIPS_32 */
+#else /* SLJIT_MIPS_REV < 1 */
+ FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS));
+ return push_inst(compiler, MFLO | D(dst), DR(dst));
+#endif /* SLJIT_MIPS_REV >= 6 */
+ }
+
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
+ FAIL_IF(push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst)));
+ FAIL_IF(push_inst(compiler, SELECT_OP(DMUH, MUH) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
+#else /* SLJIT_MIPS_REV < 6 */
+ FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG));
+ FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst)));
+#endif /* SLJIT_MIPS_REV >= 6 */
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSRA32, SRA) | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG));
+ return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
+
+ case SLJIT_AND:
+ EMIT_LOGICAL(ANDI, AND);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_OR:
+ EMIT_LOGICAL(ORI, OR);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_XOR:
+ if (!(flags & LOGICAL_OP)) {
+ SLJIT_ASSERT((flags & SRC2_IMM) && src2 == -1);
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
+ if (!(flags & UNUSED_DEST))
+ FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | D(dst), DR(dst)));
+ return SLJIT_SUCCESS;
+ }
+ EMIT_LOGICAL(XORI, XOR);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_SHL:
+ case SLJIT_MSHL:
+ EMIT_SHIFT(DSLL, DSLL32, SLL, DSLLV, SLLV);
+ break;
+
+ case SLJIT_LSHR:
+ case SLJIT_MLSHR:
+ EMIT_SHIFT(DSRL, DSRL32, SRL, DSRLV, SRLV);
+ break;
+
+ case SLJIT_ASHR:
+ case SLJIT_MASHR:
+ EMIT_SHIFT(DSRA, DSRA32, SRA, DSRAV, SRAV);
+ break;
+
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
+ case SLJIT_ROTL:
+ if ((flags & SRC2_IMM) || src2 == 0) {
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ src2 = -src2 & 0x1f;
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ src2 = -src2 & ((op & SLJIT_32) ? 0x1f : 0x3f);
+#endif /* SLJIT_CONFIG_MIPS_32 */
+ } else {
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(0) | T(src2) | D(TMP_REG2), DR(TMP_REG2)));
+ src2 = TMP_REG2;
+ }
+ /* fallthrough */
+
+ case SLJIT_ROTR:
+ EMIT_SHIFT(DROTR, DROTR32, ROTR, DROTRV, ROTRV);
+ break;
+#else /* SLJIT_MIPS_REV < 1 */
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
+ if (flags & SRC2_IMM) {
+ SLJIT_ASSERT(src2 != 0);
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ if (!(op & SLJIT_32)) {
+ if (GET_OPCODE(op) == SLJIT_ROTL)
+ op_imm = ((src2 < 32) ? DSLL : DSLL32);
+ else
+ op_imm = ((src2 < 32) ? DSRL : DSRL32);
+
+ FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(OTHER_FLAG) | (((sljit_ins)src2 & 0x1f) << 6), OTHER_FLAG));
+
+ src2 = 64 - src2;
+ if (GET_OPCODE(op) == SLJIT_ROTL)
+ op_imm = ((src2 < 32) ? DSRL : DSRL32);
+ else
+ op_imm = ((src2 < 32) ? DSLL : DSLL32);
+
+ FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | (((sljit_ins)src2 & 0x1f) << 6), DR(dst)));
+ return push_inst(compiler, OR | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst));
+ }
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+ op_imm = (GET_OPCODE(op) == SLJIT_ROTL) ? SLL : SRL;
+ FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(OTHER_FLAG) | ((sljit_ins)src2 << 6), OTHER_FLAG));
+
+ src2 = 32 - src2;
+ op_imm = (GET_OPCODE(op) == SLJIT_ROTL) ? SRL : SLL;
+ FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | (((sljit_ins)src2 & 0x1f) << 6), DR(dst)));
+ return push_inst(compiler, OR | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst));
+ }
+
+ if (src2 == 0) {
+ if (dst != src1)
+ return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | TA(0) | D(dst), DR(dst));
+ return SLJIT_SUCCESS;
+ }
+
+ FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(0) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
+
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ if (!(op & SLJIT_32)) {
+ op_v = (GET_OPCODE(op) == SLJIT_ROTL) ? DSLLV : DSRLV;
+ FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
+ op_v = (GET_OPCODE(op) == SLJIT_ROTL) ? DSRLV : DSLLV;
+ FAIL_IF(push_inst(compiler, op_v | SA(EQUAL_FLAG) | T(src1) | D(dst), DR(dst)));
+ return push_inst(compiler, OR | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst));
+ }
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+ op_v = (GET_OPCODE(op) == SLJIT_ROTL) ? SLLV : SRLV;
+ FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
+ op_v = (GET_OPCODE(op) == SLJIT_ROTL) ? SRLV : SLLV;
+ FAIL_IF(push_inst(compiler, op_v | SA(EQUAL_FLAG) | T(src1) | D(dst), DR(dst)));
+ return push_inst(compiler, OR | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst));
+#endif /* SLJIT_MIPS_REV >= 2 */
+
+ default:
+ SLJIT_UNREACHABLE();
+ return SLJIT_SUCCESS;
+ }
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ if ((flags & SRC2_IMM) || src2 == 0) {
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG));
+
+ if (flags & UNUSED_DEST)
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, op_imm | T(src1) | D(dst) | SH_IMM(src2), DR(dst));
+ }
+
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
+
+ if (flags & UNUSED_DEST)
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, op_v | S(src2) | T(src1) | D(dst), DR(dst));
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ if ((flags & SRC2_IMM) || src2 == 0) {
+ if (src2 >= 32) {
+ SLJIT_ASSERT(!(op & SLJIT_32));
+ ins = op_dimm32;
+ src2 -= 32;
+ }
+ else
+ ins = (op & SLJIT_32) ? op_imm : op_dimm;
+
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, ins | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG));
+
+ if (flags & UNUSED_DEST)
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, ins | T(src1) | D(dst) | SH_IMM(src2), DR(dst));
+ }
+
+ ins = (op & SLJIT_32) ? op_v : op_dv;
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
+
+ if (flags & UNUSED_DEST)
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, ins | S(src2) | T(src1) | D(dst), DR(dst));
+#endif /* SLJIT_CONFIG_MIPS_32 */
+}
+
+#define CHECK_IMM(flags, srcw) \
+ ((!((flags) & LOGICAL_OP) && ((srcw) <= SIMM_MAX && (srcw) >= SIMM_MIN)) \
+ || (((flags) & LOGICAL_OP) && !((srcw) & ~UIMM_MAX)))
+
static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
@@ -1104,39 +2290,33 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
compiler->cache_argw = 0;
}
- if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) {
+ if (dst == 0) {
SLJIT_ASSERT(HAS_FLAGS(op));
flags |= UNUSED_DEST;
+ dst = TMP_REG2;
}
else if (FAST_IS_REG(dst)) {
dst_r = dst;
flags |= REG_DEST;
- if (op >= SLJIT_MOV && op <= SLJIT_MOV_P)
+ if (flags & MOVE_OP)
sugg_src2_r = dst_r;
}
else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, DR(TMP_REG1), dst, dstw))
flags |= SLOW_DEST;
if (flags & IMM_OP) {
- if ((src2 & SLJIT_IMM) && src2w) {
- if ((!(flags & LOGICAL_OP) && (src2w <= SIMM_MAX && src2w >= SIMM_MIN))
- || ((flags & LOGICAL_OP) && !(src2w & ~UIMM_MAX))) {
- flags |= SRC2_IMM;
- src2_r = src2w;
- }
- }
- if (!(flags & SRC2_IMM) && (flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w) {
- if ((!(flags & LOGICAL_OP) && (src1w <= SIMM_MAX && src1w >= SIMM_MIN))
- || ((flags & LOGICAL_OP) && !(src1w & ~UIMM_MAX))) {
- flags |= SRC2_IMM;
- src2_r = src1w;
-
- /* And swap arguments. */
- src1 = src2;
- src1w = src2w;
- src2 = SLJIT_IMM;
- /* src2w = src2_r unneeded. */
- }
+ if (src2 == SLJIT_IMM && src2w != 0 && CHECK_IMM(flags, src2w)) {
+ flags |= SRC2_IMM;
+ src2_r = src2w;
+ } else if ((flags & CUMULATIVE_OP) && src1 == SLJIT_IMM && src1w != 0 && CHECK_IMM(flags, src1w)) {
+ flags |= SRC2_IMM;
+ src2_r = src1w;
+
+ /* And swap arguments. */
+ src1 = src2;
+ src1w = src2w;
+ src2 = SLJIT_IMM;
+ /* src2w = src2_r unneeded. */
}
}
@@ -1145,7 +2325,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
src1_r = src1;
flags |= REG1_SOURCE;
}
- else if (src1 & SLJIT_IMM) {
+ else if (src1 == SLJIT_IMM) {
if (src1w) {
FAIL_IF(load_immediate(compiler, DR(TMP_REG1), src1w));
src1_r = TMP_REG1;
@@ -1165,10 +2345,10 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
if (FAST_IS_REG(src2)) {
src2_r = src2;
flags |= REG2_SOURCE;
- if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P)
- dst_r = src2_r;
+ if ((flags & (REG_DEST | MOVE_OP)) == MOVE_OP)
+ dst_r = (sljit_s32)src2_r;
}
- else if (src2 & SLJIT_IMM) {
+ else if (src2 == SLJIT_IMM) {
if (!(flags & SRC2_IMM)) {
if (src2w) {
FAIL_IF(load_immediate(compiler, DR(sugg_src2_r), src2w));
@@ -1176,8 +2356,12 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
}
else {
src2_r = 0;
- if ((op >= SLJIT_MOV && op <= SLJIT_MOV_P) && (dst & SLJIT_MEM))
- dst_r = 0;
+ if (flags & MOVE_OP) {
+ if (dst & SLJIT_MEM)
+ dst_r = 0;
+ else
+ op = SLJIT_MOV;
+ }
}
}
}
@@ -1218,10 +2402,12 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
return SLJIT_SUCCESS;
}
+#undef CHECK_IMM
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
{
#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
- sljit_s32 int_op = op & SLJIT_I32_OP;
+ sljit_s32 int_op = op & SLJIT_32;
#endif
CHECK_ERROR();
@@ -1326,11 +2512,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-# define flags 0
-#else
sljit_s32 flags = 0;
-#endif
CHECK_ERROR();
CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
@@ -1338,58 +2520,57 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
ADJUST_LOCAL_OFFSET(src, srcw);
#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
- if ((op & SLJIT_I32_OP) && GET_OPCODE(op) >= SLJIT_NOT)
- flags |= INT_DATA | SIGNED_DATA;
+ if (op & SLJIT_32)
+ flags = INT_DATA | SIGNED_DATA;
#endif
switch (GET_OPCODE(op)) {
case SLJIT_MOV:
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ case SLJIT_MOV_U32:
+ case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
+#endif
case SLJIT_MOV_P:
- return emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
+ return emit_op(compiler, SLJIT_MOV, WORD_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, srcw);
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
case SLJIT_MOV_U32:
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- return emit_op(compiler, SLJIT_MOV_U32, INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
-#else
- return emit_op(compiler, SLJIT_MOV_U32, INT_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u32)srcw : srcw);
-#endif
+ return emit_op(compiler, SLJIT_MOV_U32, INT_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_u32)srcw : srcw);
case SLJIT_MOV_S32:
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- return emit_op(compiler, SLJIT_MOV_S32, INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
-#else
- return emit_op(compiler, SLJIT_MOV_S32, INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s32)srcw : srcw);
+ case SLJIT_MOV32:
+ return emit_op(compiler, SLJIT_MOV_S32, INT_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_s32)srcw : srcw);
#endif
case SLJIT_MOV_U8:
- return emit_op(compiler, SLJIT_MOV_U8, BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
+ return emit_op(compiler, op, BYTE_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_u8)srcw : srcw);
case SLJIT_MOV_S8:
- return emit_op(compiler, SLJIT_MOV_S8, BYTE_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
+ return emit_op(compiler, op, BYTE_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_s8)srcw : srcw);
case SLJIT_MOV_U16:
- return emit_op(compiler, SLJIT_MOV_U16, HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
+ return emit_op(compiler, op, HALF_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_u16)srcw : srcw);
case SLJIT_MOV_S16:
- return emit_op(compiler, SLJIT_MOV_S16, HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
+ return emit_op(compiler, op, HALF_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_s16)srcw : srcw);
- case SLJIT_NOT:
+ case SLJIT_CLZ:
+ case SLJIT_CTZ:
+ case SLJIT_REV:
return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
- case SLJIT_NEG:
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
- return emit_op(compiler, SLJIT_SUB | GET_ALL_FLAGS(op), flags | IMM_OP, dst, dstw, SLJIT_IMM, 0, src, srcw);
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ return emit_op(compiler, op, HALF_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
- case SLJIT_CLZ:
- return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+ return emit_op(compiler, op | SLJIT_32, INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
}
SLJIT_UNREACHABLE();
return SLJIT_SUCCESS;
-
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-# undef flags
-#endif
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1397,27 +2578,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-# define flags 0
-#else
sljit_s32 flags = 0;
-#endif
CHECK_ERROR();
- CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
- return SLJIT_SUCCESS;
-
#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
- if (op & SLJIT_I32_OP) {
+ if (op & SLJIT_32) {
flags |= INT_DATA | SIGNED_DATA;
- if (src1 & SLJIT_IMM)
+ if (src1 == SLJIT_IMM)
src1w = (sljit_s32)src1w;
- if (src2 & SLJIT_IMM)
+ if (src2 == SLJIT_IMM)
src2w = (sljit_s32)src2w;
}
#endif
@@ -1425,32 +2599,41 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
switch (GET_OPCODE(op)) {
case SLJIT_ADD:
case SLJIT_ADDC:
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUB:
case SLJIT_SUBC:
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_MUL:
compiler->status_flags_state = 0;
return emit_op(compiler, op, flags | CUMULATIVE_OP, dst, dstw, src1, src1w, src2, src2w);
+ case SLJIT_XOR:
+ if ((src1 == SLJIT_IMM && src1w == -1) || (src2 == SLJIT_IMM && src2w == -1)) {
+ return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
+ }
+ /* fallthrough */
case SLJIT_AND:
case SLJIT_OR:
- case SLJIT_XOR:
return emit_op(compiler, op, flags | CUMULATIVE_OP | LOGICAL_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SHL:
+ case SLJIT_MSHL:
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
case SLJIT_ASHR:
+ case SLJIT_MASHR:
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- if (src2 & SLJIT_IMM)
+ if (src2 == SLJIT_IMM)
src2w &= 0x1f;
#else
- if (src2 & SLJIT_IMM) {
- if (op & SLJIT_I32_OP)
+ if (src2 == SLJIT_IMM) {
+ if (op & SLJIT_32)
src2w &= 0x1f;
else
src2w &= 0x3f;
@@ -1461,12 +2644,110 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
SLJIT_UNREACHABLE();
return SLJIT_SUCCESS;
+}
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-# undef flags
-#endif
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, 0, 0, src1, src1w, src2, src2w);
+}
+
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+#define SELECT_OP3(op, src2w, D, D32, W) (((op & SLJIT_32) ? (W) : ((src2w) < 32) ? (D) : (D32)) | (((sljit_ins)src2w & 0x1f) << 6))
+#define SELECT_OP2(op, D, W) ((op & SLJIT_32) ? (W) : (D))
+#else /* !SLJIT_CONFIG_MIPS_64 */
+#define SELECT_OP3(op, src2w, D, D32, W) ((W) | ((sljit_ins)(src2w) << 6))
+#define SELECT_OP2(op, D, W) (W)
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
+{
+ sljit_s32 is_left;
+ sljit_ins ins1, ins2, ins3;
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ sljit_s32 inp_flags = ((op & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA;
+ sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64;
+#else /* !SLJIT_CONFIG_MIPS_64 */
+ sljit_s32 inp_flags = WORD_DATA | LOAD_DATA;
+ sljit_sw bit_length = 32;
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_shift_into(compiler, op, dst_reg, src1_reg, src2_reg, src3, src3w));
+
+ is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL);
+
+ if (src1_reg == src2_reg) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, (is_left ? SLJIT_ROTL : SLJIT_ROTR) | (op & SLJIT_32), dst_reg, 0, src1_reg, 0, src3, src3w);
+ }
+
+ ADJUST_LOCAL_OFFSET(src3, src3w);
+
+ if (src3 == SLJIT_IMM) {
+ src3w &= bit_length - 1;
+
+ if (src3w == 0)
+ return SLJIT_SUCCESS;
+
+ if (is_left) {
+ ins1 = SELECT_OP3(op, src3w, DSLL, DSLL32, SLL);
+ src3w = bit_length - src3w;
+ ins2 = SELECT_OP3(op, src3w, DSRL, DSRL32, SRL);
+ } else {
+ ins1 = SELECT_OP3(op, src3w, DSRL, DSRL32, SRL);
+ src3w = bit_length - src3w;
+ ins2 = SELECT_OP3(op, src3w, DSLL, DSLL32, SLL);
+ }
+
+ FAIL_IF(push_inst(compiler, ins1 | T(src1_reg) | D(dst_reg), DR(dst_reg)));
+ FAIL_IF(push_inst(compiler, ins2 | T(src2_reg) | D(TMP_REG1), DR(TMP_REG1)));
+ return push_inst(compiler, OR | S(dst_reg) | T(TMP_REG1) | D(dst_reg), DR(dst_reg));
+ }
+
+ if (src3 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, inp_flags, DR(TMP_REG2), src3, src3w));
+ src3 = TMP_REG2;
+ } else if (dst_reg == src3) {
+ FAIL_IF(push_inst(compiler, SELECT_OP2(op, DADDU, ADDU) | S(src3) | TA(0) | D(TMP_REG2), DR(TMP_REG2)));
+ src3 = TMP_REG2;
+ }
+
+ if (is_left) {
+ ins1 = SELECT_OP2(op, DSRL, SRL);
+ ins2 = SELECT_OP2(op, DSLLV, SLLV);
+ ins3 = SELECT_OP2(op, DSRLV, SRLV);
+ } else {
+ ins1 = SELECT_OP2(op, DSLL, SLL);
+ ins2 = SELECT_OP2(op, DSRLV, SRLV);
+ ins3 = SELECT_OP2(op, DSLLV, SLLV);
+ }
+
+ FAIL_IF(push_inst(compiler, ins2 | S(src3) | T(src1_reg) | D(dst_reg), DR(dst_reg)));
+
+ if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) {
+ FAIL_IF(push_inst(compiler, ins1 | T(src2_reg) | D(TMP_REG1) | (1 << 6), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, XORI | S(src3) | T(TMP_REG2) | ((sljit_ins)bit_length - 1), DR(TMP_REG2)));
+ src2_reg = TMP_REG1;
+ } else
+ FAIL_IF(push_inst(compiler, SELECT_OP2(op, DSUBU, SUBU) | SA(0) | T(src3) | D(TMP_REG2), DR(TMP_REG2)));
+
+ FAIL_IF(push_inst(compiler, ins3 | S(TMP_REG2) | T(src2_reg) | D(TMP_REG1), DR(TMP_REG1)));
+ return push_inst(compiler, OR | S(dst_reg) | T(TMP_REG1) | D(dst_reg), DR(dst_reg));
}
+#undef SELECT_OP3
+#undef SELECT_OP2
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw)
{
@@ -1499,21 +2780,54 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
{
- CHECK_REG_INDEX(check_sljit_get_register_index(reg));
- return reg_map[reg];
+ sljit_s32 dst_ar = RETURN_ADDR_REG;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_dst(compiler, op, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ switch (op) {
+ case SLJIT_FAST_ENTER:
+ if (FAST_IS_REG(dst))
+ return push_inst(compiler, ADDU_W | SA(RETURN_ADDR_REG) | TA(0) | D(dst), UNMOVABLE_INS);
+ break;
+ case SLJIT_GET_RETURN_ADDRESS:
+ dst_ar = DR(FAST_IS_REG(dst) ? dst : TMP_REG2);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, dst_ar, SLJIT_MEM1(SLJIT_SP), compiler->local_size - SSIZE_OF(sw)));
+ break;
+ }
+
+ if (dst & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA, dst_ar, dst, dstw));
+
+ if (op == SLJIT_FAST_ENTER)
+ compiler->delay_slot = UNMOVABLE_INS;
+ }
+
+ return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
{
- CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
+ CHECK_REG_INDEX(check_sljit_get_register_index(type, reg));
+
+ if (type == SLJIT_GP_REGISTER)
+ return reg_map[reg];
+
+ if (type != SLJIT_FLOAT_REGISTER)
+ return -1;
+
return FR(reg);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
+ SLJIT_UNUSED_ARG(size);
+
CHECK_ERROR();
CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
@@ -1524,17 +2838,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
/* Floating point operators */
/* --------------------------------------------------------------------- */
-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 7))
-#define FMT(op) (((op & SLJIT_F32_OP) ^ SLJIT_F32_OP) << (21 - 8))
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_32) >> 7))
+#define FMT(op) (FMT_S | (~(sljit_ins)op & SLJIT_32) << (21 - (5 + 3)))
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-# define flags 0
+ sljit_u32 flags = 0;
#else
- sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64) << 21;
+ sljit_u32 flags = ((sljit_u32)(GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64)) << 21;
#endif
if (src & SLJIT_MEM) {
@@ -1544,15 +2858,15 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
FAIL_IF(push_inst(compiler, (TRUNC_W_S ^ (flags >> 19)) | FMT(op) | FS(src) | FD(TMP_FREG1), MOVABLE_INS));
- if (FAST_IS_REG(dst))
- return push_inst(compiler, MFC1 | flags | T(dst) | FS(TMP_FREG1), MOVABLE_INS);
+ if (FAST_IS_REG(dst)) {
+ FAIL_IF(push_inst(compiler, MFC1 | flags | T(dst) | FS(TMP_FREG1), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || (SLJIT_CONFIG_MIPS_32 && SLJIT_MIPS_REV <= 1)
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ return SLJIT_SUCCESS;
+ }
- /* Store the integer value from a VFP register. */
return emit_op_mem2(compiler, flags ? DOUBLE_DATA : SINGLE_DATA, FR(TMP_FREG1), dst, dstw, 0, 0);
-
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-# undef is_long
-#endif
}
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1560,37 +2874,158 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
sljit_s32 src, sljit_sw srcw)
{
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-# define flags 0
+ sljit_u32 flags = 0;
#else
- sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW) << 21;
+ sljit_u32 flags = ((sljit_u32)(GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW)) << 21;
#endif
-
sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
- if (FAST_IS_REG(src))
- FAIL_IF(push_inst(compiler, MTC1 | flags | T(src) | FS(TMP_FREG1), MOVABLE_INS));
- else if (src & SLJIT_MEM) {
- /* Load the integer value into a VFP register. */
- FAIL_IF(emit_op_mem2(compiler, ((flags) ? DOUBLE_DATA : SINGLE_DATA) | LOAD_DATA, FR(TMP_FREG1), src, srcw, dst, dstw));
- }
+ if (src & SLJIT_MEM)
+ FAIL_IF(emit_op_mem2(compiler, (flags ? DOUBLE_DATA : SINGLE_DATA) | LOAD_DATA, FR(TMP_FREG1), src, srcw, dst, dstw));
else {
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
- srcw = (sljit_s32)srcw;
+ if (src == SLJIT_IMM) {
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
+ srcw = (sljit_s32)srcw;
#endif
- FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw));
- FAIL_IF(push_inst(compiler, MTC1 | flags | T(TMP_REG1) | FS(TMP_FREG1), MOVABLE_INS));
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw));
+ src = TMP_REG1;
+ }
+
+ FAIL_IF(push_inst(compiler, MTC1 | flags | T(src) | FS(TMP_FREG1), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || (SLJIT_CONFIG_MIPS_32 && SLJIT_MIPS_REV <= 1)
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
}
- FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | (((op & SLJIT_F32_OP) ^ SLJIT_F32_OP) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | ((~(sljit_ins)op & SLJIT_32) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
if (dst & SLJIT_MEM)
return emit_op_mem2(compiler, FLOAT_DATA(op), FR(TMP_FREG1), dst, dstw, 0, 0);
return SLJIT_SUCCESS;
+}
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-# undef flags
+ sljit_u32 flags = 0;
+#else
+ sljit_u32 flags = 1 << 21;
+#endif
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem2(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_UW ? WORD_DATA : INT_DATA) | LOAD_DATA, DR(TMP_REG1), src, srcw, dst, dstw));
+ src = TMP_REG1;
+ } else if (src == SLJIT_IMM) {
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_U32)
+ srcw = (sljit_u32)srcw;
#endif
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw));
+ src = TMP_REG1;
+ }
+
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_U32) {
+ if (src != TMP_REG1) {
+ FAIL_IF(push_inst(compiler, DSLL32 | T(src) | D(TMP_REG1) | SH_IMM(0), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, DSRL32 | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(0), DR(TMP_REG1)));
+ }
+
+ FAIL_IF(push_inst(compiler, MTC1 | flags | T(TMP_REG1) | FS(TMP_FREG1), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV)
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+
+ FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | ((~(sljit_ins)op & SLJIT_32) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem2(compiler, FLOAT_DATA(op), FR(TMP_FREG1), dst, dstw, 0, 0);
+ return SLJIT_SUCCESS;
+ }
+#else /* !SLJIT_CONFIG_MIPS_64 */
+ if (!(op & SLJIT_32)) {
+ FAIL_IF(push_inst(compiler, SLL | T(src) | D(TMP_REG2) | SH_IMM(1), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, SRL | T(TMP_REG2) | D(TMP_REG2) | SH_IMM(1), DR(TMP_REG2)));
+
+ FAIL_IF(push_inst(compiler, MTC1 | flags | T(TMP_REG2) | FS(TMP_FREG1), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+
+ FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | 1 | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
+
+#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 1)
+ FAIL_IF(push_inst(compiler, BGEZ | S(src) | 5, UNMOVABLE_INS));
+#else /* SLJIT_MIPS_REV >= 1 */
+ FAIL_IF(push_inst(compiler, BGEZ | S(src) | 4, UNMOVABLE_INS));
+#endif /* SLJIT_MIPS_REV < 1 */
+
+ FAIL_IF(push_inst(compiler, LUI | T(TMP_REG2) | IMM(0x41e0), UNMOVABLE_INS));
+ FAIL_IF(push_inst(compiler, MTC1 | TA(0) | FS(TMP_FREG2), UNMOVABLE_INS));
+ switch (cpu_feature_list & CPU_FEATURE_FR) {
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+ case CPU_FEATURE_FR:
+ FAIL_IF(push_inst(compiler, MTHC1 | T(TMP_REG2) | FS(TMP_FREG2), UNMOVABLE_INS));
+ break;
+#endif /* SLJIT_MIPS_REV >= 2 */
+ default:
+ FAIL_IF(push_inst(compiler, MTC1 | T(TMP_REG2) | FS(TMP_FREG2) | (1 << 11), UNMOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ break;
+ }
+ FAIL_IF(push_inst(compiler, ADD_S | FMT(op) | FT(TMP_FREG2) | FS(dst_r) | FD(dst_r), UNMOVABLE_INS));
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem2(compiler, FLOAT_DATA(op), FR(TMP_FREG1), dst, dstw, 0, 0);
+ return SLJIT_SUCCESS;
+ }
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 1)
+ FAIL_IF(push_inst(compiler, BLTZ | S(src) | 5, UNMOVABLE_INS));
+#else /* SLJIT_MIPS_REV >= 1 */
+ FAIL_IF(push_inst(compiler, BLTZ | S(src) | 4, UNMOVABLE_INS));
+#endif /* SLJIT_MIPS_REV < 1 */
+ FAIL_IF(push_inst(compiler, ANDI | S(src) | T(TMP_REG2) | IMM(1), DR(TMP_REG2)));
+
+ FAIL_IF(push_inst(compiler, MTC1 | flags | T(src) | FS(TMP_FREG1), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV)
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* !SLJIT_MIPS_REV */
+
+ FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | ((~(sljit_ins)op & SLJIT_32) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
+
+#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 1)
+ FAIL_IF(push_inst(compiler, BEQ | 6, UNMOVABLE_INS));
+#else /* SLJIT_MIPS_REV >= 1 */
+ FAIL_IF(push_inst(compiler, BEQ | 5, UNMOVABLE_INS));
+#endif /* SLJIT_MIPS_REV < 1 */
+
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ FAIL_IF(push_inst(compiler, DSRL | T(src) | D(TMP_REG1) | SH_IMM(1), DR(TMP_REG1)));
+#else /* !SLJIT_CONFIG_MIPS_64 */
+ FAIL_IF(push_inst(compiler, SRL | T(src) | D(TMP_REG1) | SH_IMM(1), DR(TMP_REG1)));
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+ FAIL_IF(push_inst(compiler, OR | S(TMP_REG1) | T(TMP_REG2) | D(TMP_REG1), DR(TMP_REG1)));
+
+ FAIL_IF(push_inst(compiler, MTC1 | flags | T(TMP_REG1) | FS(TMP_FREG1), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV)
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* !SLJIT_MIPS_REV */
+
+ FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | ((~(sljit_ins)op & SLJIT_32) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, ADD_S | FMT(op) | FT(dst_r) | FS(dst_r) | FD(dst_r), UNMOVABLE_INS));
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem2(compiler, FLOAT_DATA(op), FR(TMP_FREG1), dst, dstw, 0, 0);
+ return SLJIT_SUCCESS;
}
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1610,20 +3045,32 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
}
switch (GET_FLAG_TYPE(op)) {
- case SLJIT_EQUAL_F64:
- case SLJIT_NOT_EQUAL_F64:
+ case SLJIT_F_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ inst = C_EQ_S;
+ break;
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
inst = C_UEQ_S;
break;
- case SLJIT_LESS_F64:
- case SLJIT_GREATER_EQUAL_F64:
+ case SLJIT_F_LESS:
+ case SLJIT_ORDERED_LESS:
+ inst = C_OLT_S;
+ break;
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS:
inst = C_ULT_S;
break;
- case SLJIT_GREATER_F64:
- case SLJIT_LESS_EQUAL_F64:
+ case SLJIT_F_GREATER:
+ case SLJIT_ORDERED_GREATER:
inst = C_ULE_S;
break;
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER:
+ inst = C_OLE_S;
+ break;
default:
- SLJIT_ASSERT(GET_FLAG_TYPE(op) == SLJIT_UNORDERED_F64 || GET_FLAG_TYPE(op) == SLJIT_ORDERED_F64);
+ SLJIT_ASSERT(GET_FLAG_TYPE(op) == SLJIT_UNORDERED);
inst = C_UN_S;
break;
}
@@ -1640,11 +3087,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_32 == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
@@ -1657,7 +3104,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1)
- FAIL_IF(push_inst(compiler, MOV_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT(op)) | FS(src) | FD(dst_r), MOVABLE_INS));
else
dst_r = src;
}
@@ -1669,8 +3116,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
FAIL_IF(push_inst(compiler, ABS_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS));
break;
case SLJIT_CONV_F64_FROM_F32:
- FAIL_IF(push_inst(compiler, CVT_S_S | ((op & SLJIT_F32_OP) ? 1 : (1 << 21)) | FS(src) | FD(dst_r), MOVABLE_INS));
- op ^= SLJIT_F32_OP;
+ /* The SLJIT_32 bit is inverted because sljit_f32 needs to be loaded from the memory. */
+ FAIL_IF(push_inst(compiler, CVT_S_S | (sljit_ins)((op & SLJIT_32) ? 1 : (1 << 21)) | FS(src) | FD(dst_r), MOVABLE_INS));
+ op ^= SLJIT_32;
break;
}
@@ -1737,18 +3185,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
case SLJIT_ADD_F64:
FAIL_IF(push_inst(compiler, ADD_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
break;
-
case SLJIT_SUB_F64:
FAIL_IF(push_inst(compiler, SUB_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
break;
-
case SLJIT_MUL_F64:
FAIL_IF(push_inst(compiler, MUL_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
break;
-
case SLJIT_DIV_F64:
FAIL_IF(push_inst(compiler, DIV_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
break;
+ case SLJIT_COPYSIGN_F64:
+ return emit_copysign(compiler, op, src1, src2, dst_r);
}
if (dst_r == TMP_FREG2)
@@ -1757,23 +3204,24 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
return SLJIT_SUCCESS;
}
-/* --------------------------------------------------------------------- */
-/* Other instructions */
-/* --------------------------------------------------------------------- */
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
{
+ union {
+ sljit_s32 imm;
+ sljit_f32 value;
+ } u;
+
CHECK_ERROR();
- CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
- ADJUST_LOCAL_OFFSET(dst, dstw);
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
- if (FAST_IS_REG(dst))
- return push_inst(compiler, ADDU_W | SA(RETURN_ADDR_REG) | TA(0) | D(dst), UNMOVABLE_INS);
+ u.value = value;
- /* Memory. */
- FAIL_IF(emit_op_mem(compiler, WORD_DATA, RETURN_ADDR_REG, dst, dstw));
- compiler->delay_slot = UNMOVABLE_INS;
- return SLJIT_SUCCESS;
+ if (u.imm == 0)
+ return push_inst(compiler, MTC1 | TA(0) | FS(freg), MOVABLE_INS);
+
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG1), u.imm));
+ return push_inst(compiler, MTC1 | T(TMP_REG1) | FS(freg), MOVABLE_INS);
}
/* --------------------------------------------------------------------- */
@@ -1798,18 +3246,18 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi
}
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
-#define JUMP_LENGTH 4
+#define BRANCH_LENGTH 4
#else
-#define JUMP_LENGTH 8
+#define BRANCH_LENGTH 8
#endif
#define BR_Z(src) \
- inst = BEQ | SA(src) | TA(0) | JUMP_LENGTH; \
+ inst = BEQ | SA(src) | TA(0) | BRANCH_LENGTH; \
flags = IS_BIT26_COND; \
delay_check = src;
#define BR_NZ(src) \
- inst = BNE | SA(src) | TA(0) | JUMP_LENGTH; \
+ inst = BNE | SA(src) | TA(0) | BRANCH_LENGTH; \
flags = IS_BIT26_COND; \
delay_check = src;
@@ -1827,11 +3275,11 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi
#else /* SLJIT_MIPS_REV < 6 */
#define BR_T() \
- inst = BC1T | JUMP_LENGTH; \
+ inst = BC1T | BRANCH_LENGTH; \
flags = IS_BIT16_COND; \
delay_check = FCSR_FCC;
#define BR_F() \
- inst = BC1F | JUMP_LENGTH; \
+ inst = BC1F | BRANCH_LENGTH; \
flags = IS_BIT16_COND; \
delay_check = FCSR_FCC;
@@ -1841,7 +3289,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
{
struct sljit_jump *jump;
sljit_ins inst;
- sljit_s32 flags = 0;
+ sljit_u32 flags = 0;
sljit_s32 delay_check = UNMOVABLE_INS;
CHECK_ERROR_PTR();
@@ -1864,6 +3312,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
case SLJIT_SIG_LESS:
case SLJIT_SIG_GREATER:
case SLJIT_OVERFLOW:
+ case SLJIT_CARRY:
BR_Z(OTHER_FLAG);
break;
case SLJIT_GREATER_EQUAL:
@@ -1871,18 +3320,31 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
case SLJIT_SIG_GREATER_EQUAL:
case SLJIT_SIG_LESS_EQUAL:
case SLJIT_NOT_OVERFLOW:
+ case SLJIT_NOT_CARRY:
BR_NZ(OTHER_FLAG);
break;
- case SLJIT_NOT_EQUAL_F64:
- case SLJIT_GREATER_EQUAL_F64:
- case SLJIT_GREATER_F64:
- case SLJIT_ORDERED_F64:
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_F_GREATER:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER:
+ case SLJIT_UNORDERED_OR_GREATER:
+ case SLJIT_ORDERED:
BR_T();
break;
- case SLJIT_EQUAL_F64:
- case SLJIT_LESS_F64:
- case SLJIT_LESS_EQUAL_F64:
- case SLJIT_UNORDERED_F64:
+ case SLJIT_F_EQUAL:
+ case SLJIT_F_LESS:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
+ case SLJIT_ORDERED_LESS:
+ case SLJIT_UNORDERED_OR_LESS:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
+ case SLJIT_UNORDERED:
BR_F();
break;
default:
@@ -1898,8 +3360,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
if (inst)
PTR_FAIL_IF(push_inst(compiler, inst, UNMOVABLE_INS));
- PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0));
-
if (type <= SLJIT_JUMP)
PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS));
else {
@@ -1909,11 +3369,18 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
jump->addr = compiler->size;
PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+
+ /* Maximum number of instructions required for generating a constant. */
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ compiler->size += 2;
+#else
+ compiler->size += 6;
+#endif
return jump;
}
#define RESOLVE_IMM1() \
- if (src1 & SLJIT_IMM) { \
+ if (src1 == SLJIT_IMM) { \
if (src1w) { \
PTR_FAIL_IF(load_immediate(compiler, DR(TMP_REG1), src1w)); \
src1 = TMP_REG1; \
@@ -1923,7 +3390,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
}
#define RESOLVE_IMM2() \
- if (src2 & SLJIT_IMM) { \
+ if (src2 == SLJIT_IMM) { \
if (src2w) { \
PTR_FAIL_IF(load_immediate(compiler, DR(TMP_REG2), src2w)); \
src2 = TMP_REG2; \
@@ -1947,11 +3414,17 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- flags = ((type & SLJIT_I32_OP) ? INT_DATA : WORD_DATA) | LOAD_DATA;
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ flags = WORD_DATA | LOAD_DATA;
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ flags = ((type & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA;
+#endif /* SLJIT_CONFIG_MIPS_32 */
+
if (src1 & SLJIT_MEM) {
PTR_FAIL_IF(emit_op_mem2(compiler, flags, DR(TMP_REG1), src1, src1w, src2, src2w));
src1 = TMP_REG1;
}
+
if (src2 & SLJIT_MEM) {
PTR_FAIL_IF(emit_op_mem2(compiler, flags, DR(TMP_REG2), src2, src2w, 0, 0));
src2 = TMP_REG2;
@@ -1968,11 +3441,10 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
jump->flags |= IS_BIT26_COND;
if (compiler->delay_slot == MOVABLE_INS || (compiler->delay_slot != UNMOVABLE_INS && compiler->delay_slot != DR(src1) && compiler->delay_slot != DR(src2)))
jump->flags |= IS_MOVABLE;
- PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(src1) | T(src2) | JUMP_LENGTH, UNMOVABLE_INS));
- }
- else if (type >= SLJIT_SIG_LESS && (((src1 & SLJIT_IMM) && (src1w == 0)) || ((src2 & SLJIT_IMM) && (src2w == 0)))) {
+ PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(src1) | T(src2) | BRANCH_LENGTH, UNMOVABLE_INS));
+ } else if (type >= SLJIT_SIG_LESS && ((src1 == SLJIT_IMM && src1w == 0) || (src2 == SLJIT_IMM && src2w == 0))) {
inst = NOP;
- if ((src1 & SLJIT_IMM) && (src1w == 0)) {
+ if (src1 == SLJIT_IMM && src1w == 0) {
RESOLVE_IMM2();
switch (type) {
case SLJIT_SIG_LESS:
@@ -2015,12 +3487,12 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
break;
}
}
- PTR_FAIL_IF(push_inst(compiler, inst | S(src1) | JUMP_LENGTH, UNMOVABLE_INS));
+ PTR_FAIL_IF(push_inst(compiler, inst | S(src1) | BRANCH_LENGTH, UNMOVABLE_INS));
}
else {
if (type == SLJIT_LESS || type == SLJIT_GREATER_EQUAL || type == SLJIT_SIG_LESS || type == SLJIT_SIG_GREATER_EQUAL) {
RESOLVE_IMM1();
- if ((src2 & SLJIT_IMM) && src2w <= SIMM_MAX && src2w >= SIMM_MIN)
+ if (src2 == SLJIT_IMM && src2w <= SIMM_MAX && src2w >= SIMM_MIN)
PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_LESS_EQUAL ? SLTIU : SLTI) | S(src1) | T(TMP_REG1) | IMM(src2w), DR(TMP_REG1)));
else {
RESOLVE_IMM2();
@@ -2030,7 +3502,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
}
else {
RESOLVE_IMM2();
- if ((src1 & SLJIT_IMM) && src1w <= SIMM_MAX && src1w >= SIMM_MIN)
+ if (src1 == SLJIT_IMM && src1w <= SIMM_MAX && src1w >= SIMM_MIN)
PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_LESS_EQUAL ? SLTIU : SLTI) | S(src2) | T(TMP_REG1) | IMM(src1w), DR(TMP_REG1)));
else {
RESOLVE_IMM1();
@@ -2040,70 +3512,83 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
}
jump->flags |= IS_BIT26_COND;
- PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(TMP_REG1) | TA(0) | JUMP_LENGTH, UNMOVABLE_INS));
+ PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(TMP_REG1) | TA(0) | BRANCH_LENGTH, UNMOVABLE_INS));
}
- PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0));
PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS));
jump->addr = compiler->size;
PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+
+ /* Maximum number of instructions required for generating a constant. */
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ compiler->size += 2;
+#else
+ compiler->size += 6;
+#endif
return jump;
}
#undef RESOLVE_IMM1
#undef RESOLVE_IMM2
-#undef JUMP_LENGTH
+#undef BRANCH_LENGTH
#undef BR_Z
#undef BR_NZ
#undef BR_T
#undef BR_F
-#undef FLOAT_DATA
-#undef FMT
-
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
{
struct sljit_jump *jump = NULL;
CHECK_ERROR();
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
- ADJUST_LOCAL_OFFSET(src, srcw);
- if (src & SLJIT_IMM) {
+ if (src == SLJIT_IMM) {
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_JAL : 0));
- jump->u.target = srcw;
+ jump->u.target = (sljit_uw)srcw;
if (compiler->delay_slot != UNMOVABLE_INS)
jump->flags |= IS_MOVABLE;
- FAIL_IF(emit_const(compiler, TMP_REG2, 0));
src = TMP_REG2;
- }
- else if (src & SLJIT_MEM) {
+ } else if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(TMP_REG2), src, srcw));
src = TMP_REG2;
}
- FAIL_IF(push_inst(compiler, JR | S(src), UNMOVABLE_INS));
- if (jump)
+ if (type <= SLJIT_JUMP)
+ FAIL_IF(push_inst(compiler, JR | S(src), UNMOVABLE_INS));
+ else
+ FAIL_IF(push_inst(compiler, JALR | S(src) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+
+ if (jump != NULL) {
jump->addr = compiler->size;
- FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
- return SLJIT_SUCCESS;
+
+ /* Maximum number of instructions required for generating a constant. */
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ compiler->size += 2;
+#else
+ compiler->size += 6;
+#endif
+ }
+
+ return push_inst(compiler, NOP, UNMOVABLE_INS);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
{
- sljit_s32 src_ar, dst_ar;
+ sljit_s32 src_ar, dst_ar, invert;
sljit_s32 saved_op = op;
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
sljit_s32 mem_type = WORD_DATA;
#else
- sljit_s32 mem_type = (op & SLJIT_I32_OP) ? (INT_DATA | SIGNED_DATA) : WORD_DATA;
+ sljit_s32 mem_type = ((op & SLJIT_32) || op == SLJIT_MOV32) ? (INT_DATA | SIGNED_DATA) : WORD_DATA;
#endif
CHECK_ERROR();
@@ -2111,10 +3596,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
ADJUST_LOCAL_OFFSET(dst, dstw);
op = GET_OPCODE(op);
-#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
- if (op == SLJIT_MOV_S32)
- mem_type = INT_DATA | SIGNED_DATA;
-#endif
dst_ar = DR((op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2);
compiler->cache_arg = 0;
@@ -2123,31 +3604,45 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
if (op >= SLJIT_ADD && (dst & SLJIT_MEM))
FAIL_IF(emit_op_mem2(compiler, mem_type | LOAD_DATA, DR(TMP_REG1), dst, dstw, dst, dstw));
- switch (type & 0xff) {
- case SLJIT_EQUAL:
- case SLJIT_NOT_EQUAL:
- FAIL_IF(push_inst(compiler, SLTIU | SA(EQUAL_FLAG) | TA(dst_ar) | IMM(1), dst_ar));
- src_ar = dst_ar;
- break;
- case SLJIT_OVERFLOW:
- case SLJIT_NOT_OVERFLOW:
- if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB) {
- src_ar = OTHER_FLAG;
+ if (type < SLJIT_F_EQUAL) {
+ src_ar = OTHER_FLAG;
+ invert = type & 0x1;
+
+ switch (type) {
+ case SLJIT_EQUAL:
+ case SLJIT_NOT_EQUAL:
+ FAIL_IF(push_inst(compiler, SLTIU | SA(EQUAL_FLAG) | TA(dst_ar) | IMM(1), dst_ar));
+ src_ar = dst_ar;
+ break;
+ case SLJIT_OVERFLOW:
+ case SLJIT_NOT_OVERFLOW:
+ if (compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)) {
+ src_ar = OTHER_FLAG;
+ break;
+ }
+ FAIL_IF(push_inst(compiler, SLTIU | SA(OTHER_FLAG) | TA(dst_ar) | IMM(1), dst_ar));
+ src_ar = dst_ar;
+ invert ^= 0x1;
break;
}
- FAIL_IF(push_inst(compiler, SLTIU | SA(OTHER_FLAG) | TA(dst_ar) | IMM(1), dst_ar));
- src_ar = dst_ar;
- type ^= 0x1; /* Flip type bit for the XORI below. */
- break;
- case SLJIT_GREATER_F64:
- case SLJIT_LESS_EQUAL_F64:
- type ^= 0x1; /* Flip type bit for the XORI below. */
- case SLJIT_EQUAL_F64:
- case SLJIT_NOT_EQUAL_F64:
- case SLJIT_LESS_F64:
- case SLJIT_GREATER_EQUAL_F64:
- case SLJIT_UNORDERED_F64:
- case SLJIT_ORDERED_F64:
+ } else {
+ invert = 0;
+
+ switch (type) {
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_F_GREATER:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER:
+ case SLJIT_UNORDERED_OR_GREATER:
+ case SLJIT_ORDERED:
+ invert = 1;
+ break;
+ }
+
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
FAIL_IF(push_inst(compiler, MFC1 | TA(dst_ar) | FS(TMP_FREG3), dst_ar));
#else /* SLJIT_MIPS_REV < 6 */
@@ -2156,14 +3651,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
FAIL_IF(push_inst(compiler, SRL | TA(dst_ar) | DA(dst_ar) | SH_IMM(23), dst_ar));
FAIL_IF(push_inst(compiler, ANDI | SA(dst_ar) | TA(dst_ar) | IMM(1), dst_ar));
src_ar = dst_ar;
- break;
-
- default:
- src_ar = OTHER_FLAG;
- break;
}
- if (type & 0x1) {
+ if (invert) {
FAIL_IF(push_inst(compiler, XORI | SA(src_ar) | TA(dst_ar) | IMM(1), dst_ar));
src_ar = dst_ar;
}
@@ -2188,77 +3678,537 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
return emit_op(compiler, saved_op, mem_type, dst, dstw, dst, dstw, TMP_REG2, 0);
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
- sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
-{
-#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6)
- sljit_ins ins;
-#endif /* SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6 */
-
- CHECK_ERROR();
- CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
-
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6)
- if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
-#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
- if (dst_reg & SLJIT_I32_OP)
- srcw = (sljit_s32)srcw;
-#endif
- FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw));
- src = TMP_REG1;
- srcw = 0;
- }
-
- dst_reg &= ~SLJIT_I32_OP;
-
- switch (type & 0xff) {
+static sljit_ins get_select_cc(sljit_s32 type, sljit_s32 is_float)
+{
+ switch (type & ~SLJIT_32) {
case SLJIT_EQUAL:
- ins = MOVZ | TA(EQUAL_FLAG);
- break;
+ return (is_float ? MOVZ_S : MOVZ) | TA(EQUAL_FLAG);
case SLJIT_NOT_EQUAL:
- ins = MOVN | TA(EQUAL_FLAG);
- break;
+ return (is_float ? MOVN_S : MOVN) | TA(EQUAL_FLAG);
case SLJIT_LESS:
case SLJIT_GREATER:
case SLJIT_SIG_LESS:
case SLJIT_SIG_GREATER:
case SLJIT_OVERFLOW:
- ins = MOVN | TA(OTHER_FLAG);
- break;
+ case SLJIT_CARRY:
+ return (is_float ? MOVN_S : MOVN) | TA(OTHER_FLAG);
case SLJIT_GREATER_EQUAL:
case SLJIT_LESS_EQUAL:
case SLJIT_SIG_GREATER_EQUAL:
case SLJIT_SIG_LESS_EQUAL:
case SLJIT_NOT_OVERFLOW:
- ins = MOVZ | TA(OTHER_FLAG);
- break;
- case SLJIT_EQUAL_F64:
- case SLJIT_LESS_F64:
- case SLJIT_LESS_EQUAL_F64:
- case SLJIT_UNORDERED_F64:
- ins = MOVT;
- break;
- case SLJIT_NOT_EQUAL_F64:
- case SLJIT_GREATER_EQUAL_F64:
- case SLJIT_GREATER_F64:
- case SLJIT_ORDERED_F64:
- ins = MOVF;
- break;
+ case SLJIT_NOT_CARRY:
+ return (is_float ? MOVZ_S : MOVZ) | TA(OTHER_FLAG);
+ case SLJIT_F_EQUAL:
+ case SLJIT_F_LESS:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
+ case SLJIT_ORDERED_LESS:
+ case SLJIT_UNORDERED_OR_LESS:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
+ case SLJIT_UNORDERED:
+ return is_float ? MOVT_S : MOVT;
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_F_GREATER:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER:
+ case SLJIT_UNORDERED_OR_GREATER:
+ case SLJIT_ORDERED:
+ return is_float ? MOVF_S : MOVF;
default:
- ins = MOVZ | TA(OTHER_FLAG);
SLJIT_UNREACHABLE();
- break;
+ return (is_float ? MOVZ_S : MOVZ) | TA(OTHER_FLAG);
+ }
+}
+
+#endif /* SLJIT_MIPS_REV >= 1 */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_reg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
+{
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ sljit_s32 inp_flags = ((type & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA;
+ sljit_ins mov_ins = (type & SLJIT_32) ? ADDU : DADDU;
+#else /* !SLJIT_CONFIG_MIPS_64 */
+ sljit_s32 inp_flags = WORD_DATA | LOAD_DATA;
+ sljit_ins mov_ins = ADDU;
+#endif /* SLJIT_CONFIG_MIPS_64 */
+
+#if !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6)
+ struct sljit_label *label;
+ struct sljit_jump *jump;
+#endif /* !(SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6) */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_select(compiler, type, dst_reg, src1, src1w, src2_reg));
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6)
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, inp_flags, DR(TMP_REG2), src1, src1w));
+ src1 = TMP_REG2;
+ } else if (src1 == SLJIT_IMM) {
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ if (type & SLJIT_32)
+ src1w = (sljit_s32)src1w;
+#endif
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG1), src1w));
+ src1 = TMP_REG1;
+ }
+
+ if (dst_reg != src2_reg) {
+ if (dst_reg == src1) {
+ src1 = src2_reg;
+ type ^= 0x1;
+ } else
+ FAIL_IF(push_inst(compiler, mov_ins | S(src2_reg) | TA(0) | D(dst_reg), DR(dst_reg)));
}
- return push_inst(compiler, ins | S(src) | D(dst_reg), DR(dst_reg));
+ return push_inst(compiler, get_select_cc(type, 0) | S(src1) | D(dst_reg), DR(dst_reg));
#else /* SLJIT_MIPS_REV < 1 || SLJIT_MIPS_REV >= 6 */
- return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);
+ if (dst_reg != src2_reg) {
+ if (dst_reg == src1) {
+ src1 = src2_reg;
+ src1w = 0;
+ type ^= 0x1;
+ } else {
+ if (ADDRESSING_DEPENDS_ON(src1, dst_reg)) {
+ FAIL_IF(push_inst(compiler, ADDU_W | S(dst_reg) | TA(0) | D(TMP_REG2), DR(TMP_REG2)));
+
+ if ((src1 & REG_MASK) == dst_reg)
+ src1 = (src1 & ~REG_MASK) | TMP_REG2;
+
+ if (OFFS_REG(src1) == dst_reg)
+ src1 = (src1 & ~OFFS_REG_MASK) | TO_OFFS_REG(TMP_REG2);
+ }
+
+ FAIL_IF(push_inst(compiler, mov_ins | S(src2_reg) | TA(0) | D(dst_reg), DR(dst_reg)));
+ }
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
+ jump = sljit_emit_jump(compiler, (type & ~SLJIT_32) ^ 0x1);
+ FAIL_IF(!jump);
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, inp_flags, DR(dst_reg), src1, src1w));
+ } else if (src1 == SLJIT_IMM) {
+#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
+ if (type & SLJIT_32)
+ src1w = (sljit_s32)src1w;
+#endif /* SLJIT_CONFIG_MIPS_64 */
+ FAIL_IF(load_immediate(compiler, DR(dst_reg), src1w));
+ } else
+ FAIL_IF(push_inst(compiler, mov_ins | S(src1) | TA(0) | D(dst_reg), DR(dst_reg)));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ label = sljit_emit_label(compiler);
+ FAIL_IF(!label);
+
+ sljit_set_label(jump, label);
+ return SLJIT_SUCCESS;
#endif /* SLJIT_MIPS_REV >= 1 */
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
+{
+#if !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6)
+ struct sljit_label *label;
+ struct sljit_jump *jump;
+#endif /* !(SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6) */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fselect(compiler, type, dst_freg, src1, src1w, src2_freg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (dst_freg != src2_freg) {
+ if (dst_freg == src1) {
+ src1 = src2_freg;
+ src1w = 0;
+ type ^= 0x1;
+ } else
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT(type)) | FS(src2_freg) | FD(dst_freg), MOVABLE_INS));
+ }
+
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6)
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(type) | LOAD_DATA, FR(TMP_FREG1), src1, src1w));
+ src1 = TMP_FREG1;
+ }
+
+ return push_inst(compiler, get_select_cc(type, 1) | FMT(type) | FS(src1) | FD(dst_freg), MOVABLE_INS);
+
+#else /* SLJIT_MIPS_REV < 1 || SLJIT_MIPS_REV >= 6 */
+ SLJIT_SKIP_CHECKS(compiler);
+ jump = sljit_emit_jump(compiler, (type & ~SLJIT_32) ^ 0x1);
+ FAIL_IF(!jump);
+
+ if (src1 & SLJIT_MEM)
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(type) | LOAD_DATA, FR(dst_freg), src1, src1w));
+ else
+ FAIL_IF(push_inst(compiler, MOV_fmt(FMT(type)) | FS(src1) | FD(dst_freg), MOVABLE_INS));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ label = sljit_emit_label(compiler);
+ FAIL_IF(!label);
+
+ sljit_set_label(jump, label);
+ return SLJIT_SUCCESS;
+#endif /* SLJIT_MIPS_REV >= 1 */
+}
+
+#undef FLOAT_DATA
+#undef FMT
+
+static sljit_s32 update_mem_addr(struct sljit_compiler *compiler, sljit_s32 *mem, sljit_sw *memw, sljit_s16 max_offset)
+{
+ sljit_s32 arg = *mem;
+ sljit_sw argw = *memw;
+
+ if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
+ argw &= 0x3;
+
+ if (SLJIT_UNLIKELY(argw)) {
+ FAIL_IF(push_inst(compiler, SLL_W | T(OFFS_REG(arg)) | D(TMP_REG1) | SH_IMM(argw), DR(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG1) | T(arg & REG_MASK) | D(TMP_REG1), DR(TMP_REG1)));
+ } else
+ FAIL_IF(push_inst(compiler, ADDU_W | S(arg & REG_MASK) | T(OFFS_REG(arg)) | D(TMP_REG1), DR(TMP_REG1)));
+
+ *mem = TMP_REG1;
+ *memw = 0;
+
+ return SLJIT_SUCCESS;
+ }
+
+ if (argw <= max_offset && argw >= SIMM_MIN) {
+ *mem = arg & REG_MASK;
+ return SLJIT_SUCCESS;
+ }
+
+ *mem = TMP_REG1;
+
+ if ((sljit_s16)argw > max_offset) {
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG1), argw));
+ *memw = 0;
+ } else {
+ FAIL_IF(load_immediate(compiler, DR(TMP_REG1), TO_ARGW_HI(argw)));
+ *memw = (sljit_s16)argw;
+ }
+
+ if ((arg & REG_MASK) == 0)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, ADDU_W | S(TMP_REG1) | T(arg & REG_MASK) | D(TMP_REG1), DR(TMP_REG1));
+}
+
+#if (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN)
+#define IMM_LEFT(memw) IMM((memw) + SSIZE_OF(sw) - 1)
+#define IMM_RIGHT(memw) IMM(memw)
+#define IMM_32_LEFT(memw) IMM((memw) + SSIZE_OF(s32) - 1)
+#define IMM_32_RIGHT(memw) IMM(memw)
+#define IMM_F64_FIRST_LEFT(memw) IMM((memw) + SSIZE_OF(s32) - 1)
+#define IMM_F64_FIRST_RIGHT(memw) IMM(memw)
+#define IMM_F64_SECOND_LEFT(memw) IMM((memw) + SSIZE_OF(f64) - 1)
+#define IMM_F64_SECOND_RIGHT(memw) IMM((memw) + SSIZE_OF(s32))
+#define IMM_16_FIRST(memw) IMM((memw) + 1)
+#define IMM_16_SECOND(memw) IMM(memw)
+#else /* !SLJIT_LITTLE_ENDIAN */
+#define IMM_LEFT(memw) IMM(memw)
+#define IMM_RIGHT(memw) IMM((memw) + SSIZE_OF(sw) - 1)
+#define IMM_32_LEFT(memw) IMM(memw)
+#define IMM_32_RIGHT(memw) IMM((memw) + SSIZE_OF(s32) - 1)
+#define IMM_F64_FIRST_LEFT(memw) IMM((memw) + SSIZE_OF(s32))
+#define IMM_F64_FIRST_RIGHT(memw) IMM((memw) + SSIZE_OF(f64) - 1)
+#define IMM_F64_SECOND_LEFT(memw) IMM(memw)
+#define IMM_F64_SECOND_RIGHT(memw) IMM((memw) + SSIZE_OF(s32) - 1)
+#define IMM_16_FIRST(memw) IMM(memw)
+#define IMM_16_SECOND(memw) IMM((memw) + 1)
+#endif /* SLJIT_LITTLE_ENDIAN */
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+#define MEM_CHECK_UNALIGNED(type) ((type) & (SLJIT_MEM_UNALIGNED | SLJIT_MEM_ALIGNED_16))
+#else /* !SLJIT_CONFIG_MIPS_32 */
+#define MEM_CHECK_UNALIGNED(type) ((type) & (SLJIT_MEM_UNALIGNED | SLJIT_MEM_ALIGNED_16 | SLJIT_MEM_ALIGNED_32))
+#endif /* SLJIT_CONFIG_MIPS_32 */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_s32 op = type & 0xff;
+ sljit_s32 flags = 0;
+ sljit_ins ins;
+#if !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
+ sljit_ins ins_right;
+#endif /* !(SLJIT_MIPS_REV >= 6) */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ if (reg & REG_PAIR_MASK) {
+ ADJUST_LOCAL_OFFSET(mem, memw);
+
+#if !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
+ if (MEM_CHECK_UNALIGNED(type)) {
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - (2 * SSIZE_OF(sw) - 1)));
+
+ if (!(type & SLJIT_MEM_STORE) && (mem == REG_PAIR_FIRST(reg) || mem == REG_PAIR_SECOND(reg))) {
+ FAIL_IF(push_inst(compiler, ADDU_W | S(mem) | TA(0) | D(TMP_REG1), DR(TMP_REG1)));
+ mem = TMP_REG1;
+ }
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ ins = ((type & SLJIT_MEM_STORE) ? SWL : LWL) | S(mem);
+ ins_right = ((type & SLJIT_MEM_STORE) ? SWR : LWR) | S(mem);
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ ins = ((type & SLJIT_MEM_STORE) ? SDL : LDL) | S(mem);
+ ins_right = ((type & SLJIT_MEM_STORE) ? SDR : LDR) | S(mem);
+#endif /* SLJIT_CONFIG_MIPS_32 */
+
+ FAIL_IF(push_inst(compiler, ins | T(REG_PAIR_FIRST(reg)) | IMM_LEFT(memw), DR(REG_PAIR_FIRST(reg))));
+ FAIL_IF(push_inst(compiler, ins_right | T(REG_PAIR_FIRST(reg)) | IMM_RIGHT(memw), DR(REG_PAIR_FIRST(reg))));
+ FAIL_IF(push_inst(compiler, ins | T(REG_PAIR_SECOND(reg)) | IMM_LEFT(memw + SSIZE_OF(sw)), DR(REG_PAIR_SECOND(reg))));
+ return push_inst(compiler, ins_right | T(REG_PAIR_SECOND(reg)) | IMM_RIGHT(memw + SSIZE_OF(sw)), DR(REG_PAIR_SECOND(reg)));
+ }
+#endif /* !(SLJIT_MIPS_REV >= 6) */
+
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - SSIZE_OF(sw)));
+
+ ins = ((type & SLJIT_MEM_STORE) ? STORE_W : LOAD_W) | S(mem);
+
+ if (!(type & SLJIT_MEM_STORE) && mem == REG_PAIR_FIRST(reg)) {
+ FAIL_IF(push_inst(compiler, ins | T(REG_PAIR_SECOND(reg)) | IMM(memw + SSIZE_OF(sw)), DR(REG_PAIR_SECOND(reg))));
+ return push_inst(compiler, ins | T(REG_PAIR_FIRST(reg)) | IMM(memw), DR(REG_PAIR_FIRST(reg)));
+ }
+
+ FAIL_IF(push_inst(compiler, ins | T(REG_PAIR_FIRST(reg)) | IMM(memw), DR(REG_PAIR_FIRST(reg))));
+ return push_inst(compiler, ins | T(REG_PAIR_SECOND(reg)) | IMM(memw + SSIZE_OF(sw)), DR(REG_PAIR_SECOND(reg)));
+ }
+
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
+ return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
+#else /* !(SLJIT_MIPS_REV >= 6) */
+ ADJUST_LOCAL_OFFSET(mem, memw);
+
+ switch (op) {
+ case SLJIT_MOV_U8:
+ case SLJIT_MOV_S8:
+ flags = BYTE_DATA;
+ if (!(type & SLJIT_MEM_STORE))
+ flags |= LOAD_DATA;
+
+ if (op == SLJIT_MOV_S8)
+ flags |= SIGNED_DATA;
+
+ return emit_op_mem(compiler, flags, DR(reg), mem, memw);
+
+ case SLJIT_MOV_U16:
+ case SLJIT_MOV_S16:
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - 1));
+ SLJIT_ASSERT(FAST_IS_REG(mem) && mem != TMP_REG2);
+
+ if (type & SLJIT_MEM_STORE) {
+ FAIL_IF(push_inst(compiler, SRA_W | T(reg) | D(TMP_REG2) | SH_IMM(8), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, data_transfer_insts[BYTE_DATA] | S(mem) | T(TMP_REG2) | IMM_16_FIRST(memw), MOVABLE_INS));
+ return push_inst(compiler, data_transfer_insts[BYTE_DATA] | S(mem) | T(reg) | IMM_16_SECOND(memw), MOVABLE_INS);
+ }
+
+ flags = BYTE_DATA | LOAD_DATA;
+
+ if (op == SLJIT_MOV_S16)
+ flags |= SIGNED_DATA;
+
+ FAIL_IF(push_inst(compiler, data_transfer_insts[flags] | S(mem) | T(TMP_REG2) | IMM_16_FIRST(memw), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, data_transfer_insts[BYTE_DATA | LOAD_DATA] | S(mem) | T(reg) | IMM_16_SECOND(memw), DR(reg)));
+ FAIL_IF(push_inst(compiler, SLL_W | T(TMP_REG2) | D(TMP_REG2) | SH_IMM(8), DR(TMP_REG2)));
+ return push_inst(compiler, OR | S(reg) | T(TMP_REG2) | D(reg), DR(reg));
+
+ case SLJIT_MOV:
+ case SLJIT_MOV_P:
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ if (type & SLJIT_MEM_ALIGNED_32) {
+ flags = WORD_DATA;
+ if (!(type & SLJIT_MEM_STORE))
+ flags |= LOAD_DATA;
+
+ return emit_op_mem(compiler, flags, DR(reg), mem, memw);
+ }
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - 7));
+ SLJIT_ASSERT(FAST_IS_REG(mem) && mem != TMP_REG2);
+
+ if (type & SLJIT_MEM_STORE) {
+ FAIL_IF(push_inst(compiler, SDL | S(mem) | T(reg) | IMM_LEFT(memw), MOVABLE_INS));
+ return push_inst(compiler, SDR | S(mem) | T(reg) | IMM_RIGHT(memw), MOVABLE_INS);
+ }
+
+ if (mem == reg) {
+ FAIL_IF(push_inst(compiler, ADDU_W | S(mem) | TA(0) | D(TMP_REG1), DR(TMP_REG1)));
+ mem = TMP_REG1;
+ }
+
+ FAIL_IF(push_inst(compiler, LDL | S(mem) | T(reg) | IMM_LEFT(memw), DR(reg)));
+ return push_inst(compiler, LDR | S(mem) | T(reg) | IMM_RIGHT(memw), DR(reg));
+#endif /* SLJIT_CONFIG_MIPS_32 */
+ }
+
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - 3));
+ SLJIT_ASSERT(FAST_IS_REG(mem) && mem != TMP_REG2);
+
+ if (type & SLJIT_MEM_STORE) {
+ FAIL_IF(push_inst(compiler, SWL | S(mem) | T(reg) | IMM_32_LEFT(memw), MOVABLE_INS));
+ return push_inst(compiler, SWR | S(mem) | T(reg) | IMM_32_RIGHT(memw), MOVABLE_INS);
+ }
+
+ if (mem == reg) {
+ FAIL_IF(push_inst(compiler, ADDU_W | S(mem) | TA(0) | D(TMP_REG1), DR(TMP_REG1)));
+ mem = TMP_REG1;
+ }
+
+ FAIL_IF(push_inst(compiler, LWL | S(mem) | T(reg) | IMM_32_LEFT(memw), DR(reg)));
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ return push_inst(compiler, LWR | S(mem) | T(reg) | IMM_32_RIGHT(memw), DR(reg));
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ FAIL_IF(push_inst(compiler, LWR | S(mem) | T(reg) | IMM_32_RIGHT(memw), DR(reg)));
+
+ if (op != SLJIT_MOV_U32)
+ return SLJIT_SUCCESS;
+
+#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
+ return push_inst(compiler, DINSU | T(reg) | SA(0) | (31 << 11), DR(reg));
+#else /* SLJIT_MIPS_REV < 2 */
+ FAIL_IF(push_inst(compiler, DSLL32 | T(reg) | D(reg) | SH_IMM(0), DR(reg)));
+ return push_inst(compiler, DSRL32 | T(reg) | D(reg) | SH_IMM(0), DR(reg));
+#endif /* SLJIT_MIPS_REV >= 2 */
+#endif /* SLJIT_CONFIG_MIPS_32 */
+#endif /* SLJIT_MIPS_REV >= 6 */
+}
+
+#if !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
+
+ FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - (type & SLJIT_32) ? 3 : 7));
+ SLJIT_ASSERT(FAST_IS_REG(mem) && mem != TMP_REG2);
+
+ if (type & SLJIT_MEM_STORE) {
+ if (type & SLJIT_32) {
+ FAIL_IF(push_inst(compiler, MFC1 | T(TMP_REG2) | FS(freg), DR(TMP_REG2)));
+#if !defined(SLJIT_MIPS_REV) || (SLJIT_CONFIG_MIPS_32 && SLJIT_MIPS_REV <= 1)
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ FAIL_IF(push_inst(compiler, SWL | S(mem) | T(TMP_REG2) | IMM_32_LEFT(memw), MOVABLE_INS));
+ return push_inst(compiler, SWR | S(mem) | T(TMP_REG2) | IMM_32_RIGHT(memw), MOVABLE_INS);
+ }
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ FAIL_IF(push_inst(compiler, MFC1 | T(TMP_REG2) | FS(freg), DR(TMP_REG2)));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ FAIL_IF(push_inst(compiler, SWL | S(mem) | T(TMP_REG2) | IMM_F64_FIRST_LEFT(memw), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, SWR | S(mem) | T(TMP_REG2) | IMM_F64_FIRST_RIGHT(memw), MOVABLE_INS));
+ switch (cpu_feature_list & CPU_FEATURE_FR) {
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+ case CPU_FEATURE_FR:
+ FAIL_IF(push_inst(compiler, MFHC1 | T(TMP_REG2) | FS(freg), DR(TMP_REG2)));
+ break;
+#endif /* SLJIT_MIPS_REV >= 2 */
+ default:
+ FAIL_IF(push_inst(compiler, MFC1 | T(TMP_REG2) | FS(freg) | (1 << 11), DR(TMP_REG2)));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif
+ break;
+ }
+
+ FAIL_IF(push_inst(compiler, SWL | S(mem) | T(TMP_REG2) | IMM_F64_SECOND_LEFT(memw), MOVABLE_INS));
+ return push_inst(compiler, SWR | S(mem) | T(TMP_REG2) | IMM_F64_SECOND_RIGHT(memw), MOVABLE_INS);
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ FAIL_IF(push_inst(compiler, DMFC1 | T(TMP_REG2) | FS(freg), DR(TMP_REG2)));
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ FAIL_IF(push_inst(compiler, SDL | S(mem) | T(TMP_REG2) | IMM_LEFT(memw), MOVABLE_INS));
+ return push_inst(compiler, SDR | S(mem) | T(TMP_REG2) | IMM_RIGHT(memw), MOVABLE_INS);
+#endif /* SLJIT_CONFIG_MIPS_32 */
+ }
+
+ if (type & SLJIT_32) {
+ FAIL_IF(push_inst(compiler, LWL | S(mem) | T(TMP_REG2) | IMM_32_LEFT(memw), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, LWR | S(mem) | T(TMP_REG2) | IMM_32_RIGHT(memw), DR(TMP_REG2)));
+
+ FAIL_IF(push_inst(compiler, MTC1 | T(TMP_REG2) | FS(freg), MOVABLE_INS));
+#if !defined(SLJIT_MIPS_REV) || (SLJIT_CONFIG_MIPS_32 && SLJIT_MIPS_REV <= 1)
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ return SLJIT_SUCCESS;
+ }
+
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+ FAIL_IF(push_inst(compiler, LWL | S(mem) | T(TMP_REG2) | IMM_F64_FIRST_LEFT(memw), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, LWR | S(mem) | T(TMP_REG2) | IMM_F64_FIRST_RIGHT(memw), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, MTC1 | T(TMP_REG2) | FS(freg), MOVABLE_INS));
+
+ FAIL_IF(push_inst(compiler, LWL | S(mem) | T(TMP_REG2) | IMM_F64_SECOND_LEFT(memw), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, LWR | S(mem) | T(TMP_REG2) | IMM_F64_SECOND_RIGHT(memw), DR(TMP_REG2)));
+ switch (cpu_feature_list & CPU_FEATURE_FR) {
+#if defined(SLJIT_MIPS_REV) && SLJIT_MIPS_REV >= 2
+ case CPU_FEATURE_FR:
+ return push_inst(compiler, MTHC1 | T(TMP_REG2) | FS(freg), MOVABLE_INS);
+#endif /* SLJIT_MIPS_REV >= 2 */
+ default:
+ FAIL_IF(push_inst(compiler, MTC1 | T(TMP_REG2) | FS(freg) | (1 << 11), MOVABLE_INS));
+ break;
+ }
+#else /* !SLJIT_CONFIG_MIPS_32 */
+ FAIL_IF(push_inst(compiler, LDL | S(mem) | T(TMP_REG2) | IMM_LEFT(memw), DR(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, LDR | S(mem) | T(TMP_REG2) | IMM_RIGHT(memw), DR(TMP_REG2)));
+
+ FAIL_IF(push_inst(compiler, DMTC1 | T(TMP_REG2) | FS(freg), MOVABLE_INS));
+#endif /* SLJIT_CONFIG_MIPS_32 */
+#if !defined(SLJIT_MIPS_REV) || SLJIT_MIPS_REV <= 1
+ FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
+#endif /* MIPS III */
+ return SLJIT_SUCCESS;
+}
+
+#endif /* !SLJIT_MIPS_REV || SLJIT_MIPS_REV < 6 */
+
+#undef IMM_16_SECOND
+#undef IMM_16_FIRST
+#undef IMM_F64_SECOND_RIGHT
+#undef IMM_F64_SECOND_LEFT
+#undef IMM_F64_FIRST_RIGHT
+#undef IMM_F64_FIRST_LEFT
+#undef IMM_32_RIGHT
+#undef IMM_32_LEFT
+#undef IMM_RIGHT
+#undef IMM_LEFT
+#undef MEM_CHECK_UNALIGNED
+
+#undef TO_ARGW_HI
+
SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
{
struct sljit_const *const_;
@@ -2276,7 +4226,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi
PTR_FAIL_IF(emit_const(compiler, dst_r, init_value));
if (dst & SLJIT_MEM)
- PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0));
+ PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, DR(TMP_REG2), dst, dstw));
return const_;
}
@@ -2295,15 +4245,15 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj
set_put_label(put_label, compiler, 0);
dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
+ PTR_FAIL_IF(push_inst(compiler, (sljit_ins)dst_r, UNMOVABLE_INS));
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- PTR_FAIL_IF(emit_const(compiler, dst_r, 0));
+ compiler->size += 1;
#else
- PTR_FAIL_IF(push_inst(compiler, dst_r, UNMOVABLE_INS));
compiler->size += 5;
#endif
if (dst & SLJIT_MEM)
- PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0));
+ PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, DR(TMP_REG2), dst, dstw));
return put_label;
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_32.c
index 6ddb5508ec..2352fad5d4 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_32.c
@@ -38,12 +38,15 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS;
}
+/* Simplified mnemonics: clrlwi. */
#define INS_CLEAR_LEFT(dst, src, from) \
- (RLWINM | S(src) | A(dst) | ((from) << 6) | (31 << 1))
+ (RLWINM | S(src) | A(dst) | RLWI_MBE(from, 31))
static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
sljit_s32 dst, sljit_s32 src1, sljit_s32 src2)
{
+ sljit_u32 imm;
+
switch (op) {
case SLJIT_MOV:
case SLJIT_MOV_U32:
@@ -82,19 +85,20 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
}
return SLJIT_SUCCESS;
- case SLJIT_NOT:
- SLJIT_ASSERT(src1 == TMP_REG1);
- return push_inst(compiler, NOR | RC(flags) | S(src2) | A(dst) | B(src2));
-
- case SLJIT_NEG:
- SLJIT_ASSERT(src1 == TMP_REG1);
- /* Setting XER SO is not enough, CR SO is also needed. */
- return push_inst(compiler, NEG | OE((flags & ALT_FORM1) ? ALT_SET_FLAGS : 0) | RC(flags) | D(dst) | A(src2));
-
case SLJIT_CLZ:
SLJIT_ASSERT(src1 == TMP_REG1);
return push_inst(compiler, CNTLZW | S(src2) | A(dst));
+ case SLJIT_CTZ:
+ SLJIT_ASSERT(src1 == TMP_REG1);
+ FAIL_IF(push_inst(compiler, NEG | D(TMP_REG1) | A(src2)));
+ FAIL_IF(push_inst(compiler, AND | S(src2) | A(dst) | B(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, CNTLZW | S(dst) | A(dst)));
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG1) | A(dst) | IMM(-32)));
+ /* The highest bits are set, if dst < 32, zero otherwise. */
+ FAIL_IF(push_inst(compiler, SRWI(27) | S(TMP_REG1) | A(TMP_REG1)));
+ return push_inst(compiler, XOR | S(dst) | A(dst) | B(TMP_REG1));
+
case SLJIT_ADD:
if (flags & ALT_FORM1) {
/* Setting XER SO is not enough, CR SO is also needed. */
@@ -108,12 +112,14 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
if (flags & ALT_FORM3)
return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm);
+ imm = compiler->imm;
+
if (flags & ALT_FORM4) {
- FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))));
+ FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((imm >> 16) & 0xffff) + ((imm >> 15) & 0x1))));
src1 = dst;
}
- return push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff));
+ return push_inst(compiler, ADDI | D(dst) | A(src1) | (imm & 0xffff));
}
if (flags & ALT_FORM3) {
SLJIT_ASSERT(src2 == TMP_REG2);
@@ -158,7 +164,9 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
if (flags & ALT_FORM3) {
/* Setting XER SO is not enough, CR SO is also needed. */
- return push_inst(compiler, SUBF | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1));
+ if (src1 != TMP_ZERO)
+ return push_inst(compiler, SUBF | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1));
+ return push_inst(compiler, NEG | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2));
}
if (flags & ALT_FORM4) {
@@ -167,11 +175,17 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
return push_inst(compiler, SUBFIC | D(dst) | A(src1) | compiler->imm);
}
- if (!(flags & ALT_SET_FLAGS))
+ if (!(flags & ALT_SET_FLAGS)) {
+ SLJIT_ASSERT(src1 != TMP_ZERO);
return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1));
+ }
+
if (flags & ALT_FORM5)
return push_inst(compiler, SUBFC | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1));
- return push_inst(compiler, SUBF | RC(flags) | D(dst) | A(src2) | B(src1));
+
+ if (src1 != TMP_ZERO)
+ return push_inst(compiler, SUBF | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1));
+ return push_inst(compiler, NEG | RC(ALT_SET_FLAGS) | D(dst) | A(src2));
case SLJIT_SUBC:
return push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1));
@@ -205,8 +219,10 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
}
if (flags & ALT_FORM3) {
SLJIT_ASSERT(src2 == TMP_REG2);
- FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm)));
- return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16));
+ imm = compiler->imm;
+
+ FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(imm)));
+ return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(imm >> 16));
}
return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2));
@@ -221,34 +237,82 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
}
if (flags & ALT_FORM3) {
SLJIT_ASSERT(src2 == TMP_REG2);
- FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm)));
- return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16));
+ imm = compiler->imm;
+
+ FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(imm)));
+ return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(imm >> 16));
+ }
+ if (flags & ALT_FORM4) {
+ SLJIT_ASSERT(src1 == TMP_REG1);
+ return push_inst(compiler, NOR | RC(flags) | S(src2) | A(dst) | B(src2));
}
return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2));
case SLJIT_SHL:
+ case SLJIT_MSHL:
if (flags & ALT_FORM1) {
SLJIT_ASSERT(src2 == TMP_REG2);
- compiler->imm &= 0x1f;
- return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1));
+ imm = compiler->imm & 0x1f;
+ return push_inst(compiler, SLWI(imm) | RC(flags) | S(src1) | A(dst));
+ }
+
+ if (op == SLJIT_MSHL) {
+ FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f));
+ src2 = TMP_REG2;
}
+
return push_inst(compiler, SLW | RC(flags) | S(src1) | A(dst) | B(src2));
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
if (flags & ALT_FORM1) {
SLJIT_ASSERT(src2 == TMP_REG2);
- compiler->imm &= 0x1f;
- return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1));
+ imm = compiler->imm & 0x1f;
+ /* Since imm can be 0, SRWI() cannot be used. */
+ return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | RLWI_SH((32 - imm) & 0x1f) | RLWI_MBE(imm, 31));
}
+
+ if (op == SLJIT_MLSHR) {
+ FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f));
+ src2 = TMP_REG2;
+ }
+
return push_inst(compiler, SRW | RC(flags) | S(src1) | A(dst) | B(src2));
case SLJIT_ASHR:
+ case SLJIT_MASHR:
if (flags & ALT_FORM1) {
SLJIT_ASSERT(src2 == TMP_REG2);
- compiler->imm &= 0x1f;
- return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11));
+ imm = compiler->imm & 0x1f;
+ return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (imm << 11));
}
+
+ if (op == SLJIT_MASHR) {
+ FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f));
+ src2 = TMP_REG2;
+ }
+
return push_inst(compiler, SRAW | RC(flags) | S(src1) | A(dst) | B(src2));
+
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
+ if (flags & ALT_FORM1) {
+ SLJIT_ASSERT(src2 == TMP_REG2);
+ imm = compiler->imm;
+
+ if (op == SLJIT_ROTR)
+ imm = (sljit_u32)(-(sljit_s32)imm);
+
+ imm &= 0x1f;
+ return push_inst(compiler, RLWINM | S(src1) | A(dst) | RLWI_SH(imm) | RLWI_MBE(0, 31));
+ }
+
+ if (op == SLJIT_ROTR) {
+ FAIL_IF(push_inst(compiler, SUBFIC | D(TMP_REG2) | A(src2) | 0));
+ src2 = TMP_REG2;
+ }
+
+ return push_inst(compiler, RLWNM | S(src1) | A(dst) | B(src2) | RLWI_MBE(0, 31));
}
SLJIT_UNREACHABLE();
@@ -261,6 +325,151 @@ static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_
return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value));
}
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+ sljit_s32 invert_sign = 1;
+
+ if (src == SLJIT_IMM) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, srcw ^ (sljit_sw)0x80000000));
+ src = TMP_REG1;
+ invert_sign = 0;
+ } else if (!FAST_IS_REG(src)) {
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ }
+
+ /* First, a special double precision floating point value is constructed:
+ (2^53 + (src xor (2^31)))
+ The upper 32 bits of this number is a constant, and the lower 32 bits
+ is simply the value of the source argument. The xor 2^31 operation adds
+ 0x80000000 to the source argument, which moves it into the 0 - 0xffffffff
+ range. Finally we substract 2^53 + 2^31 to get the converted value. */
+ FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG2) | A(0) | 0x4330));
+ if (invert_sign)
+ FAIL_IF(push_inst(compiler, XORIS | S(src) | A(TMP_REG1) | 0x8000));
+ FAIL_IF(push_inst(compiler, STW | S(TMP_REG2) | A(SLJIT_SP) | TMP_MEM_OFFSET_HI));
+ FAIL_IF(push_inst(compiler, STW | S(TMP_REG1) | A(SLJIT_SP) | TMP_MEM_OFFSET_LO));
+ FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG1) | A(0) | 0x8000));
+ FAIL_IF(push_inst(compiler, LFD | FS(TMP_FREG1) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, STW | S(TMP_REG1) | A(SLJIT_SP) | TMP_MEM_OFFSET_LO));
+ FAIL_IF(push_inst(compiler, LFD | FS(TMP_FREG2) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+
+ FAIL_IF(push_inst(compiler, FSUB | FD(dst_r) | FA(TMP_FREG1) | FB(TMP_FREG2)));
+
+ if (op & SLJIT_32)
+ FAIL_IF(push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r)));
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1);
+ return SLJIT_SUCCESS;
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+
+ if (src == SLJIT_IMM) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
+ src = TMP_REG1;
+ } else if (!FAST_IS_REG(src)) {
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ }
+
+ /* First, a special double precision floating point value is constructed:
+ (2^53 + src)
+ The upper 32 bits of this number is a constant, and the lower 32 bits
+ is simply the value of the source argument. Finally we substract 2^53
+ to get the converted value. */
+ FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG2) | A(0) | 0x4330));
+ FAIL_IF(push_inst(compiler, STW | S(src) | A(SLJIT_SP) | TMP_MEM_OFFSET_LO));
+ FAIL_IF(push_inst(compiler, STW | S(TMP_REG2) | A(SLJIT_SP) | TMP_MEM_OFFSET_HI));
+
+ FAIL_IF(push_inst(compiler, LFD | FS(TMP_FREG1) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, STW | S(TMP_ZERO) | A(SLJIT_SP) | TMP_MEM_OFFSET_LO));
+ FAIL_IF(push_inst(compiler, LFD | FS(TMP_FREG2) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+
+ FAIL_IF(push_inst(compiler, FSUB | FD(dst_r) | FA(TMP_FREG1) | FB(TMP_FREG2)));
+
+ if (op & SLJIT_32)
+ FAIL_IF(push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r)));
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1);
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ union {
+ sljit_s32 imm[2];
+ sljit_f64 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm[0] != 0)
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm[0]));
+ if (u.imm[1] != 0)
+ FAIL_IF(load_immediate(compiler, TMP_REG2, u.imm[1]));
+
+ /* Saved in the same endianness. */
+ FAIL_IF(push_inst(compiler, STW | S(u.imm[0] != 0 ? TMP_REG1 : TMP_ZERO) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, STW | S(u.imm[1] != 0 ? TMP_REG2 : TMP_ZERO) | A(SLJIT_SP) | (TMP_MEM_OFFSET + sizeof(sljit_s32))));
+ return push_inst(compiler, LFD | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_s32 reg2 = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ if (op & SLJIT_32) {
+ if (op == SLJIT_COPY32_TO_F32) {
+ FAIL_IF(push_inst(compiler, STW | S(reg) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ return push_inst(compiler, LFS | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET);
+ }
+
+ FAIL_IF(push_inst(compiler, STFS | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ return push_inst(compiler, LWZ | S(reg) | A(SLJIT_SP) | TMP_MEM_OFFSET);
+ }
+
+ if (reg & REG_PAIR_MASK) {
+ reg2 = REG_PAIR_SECOND(reg);
+ reg = REG_PAIR_FIRST(reg);
+ }
+
+ if (op == SLJIT_COPY_TO_F64) {
+ FAIL_IF(push_inst(compiler, STW | S(reg) | A(SLJIT_SP) | TMP_MEM_OFFSET_HI));
+
+ if (reg2 != 0)
+ FAIL_IF(push_inst(compiler, STW | S(reg2) | A(SLJIT_SP) | TMP_MEM_OFFSET_LO));
+ else
+ FAIL_IF(push_inst(compiler, STFD | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET_LO));
+
+ return push_inst(compiler, LFD | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET);
+ }
+
+ FAIL_IF(push_inst(compiler, STFD | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+
+ if (reg2 != 0)
+ FAIL_IF(push_inst(compiler, LWZ | S(reg2) | A(SLJIT_SP) | TMP_MEM_OFFSET_LO));
+
+ return push_inst(compiler, LWZ | S(reg) | A(SLJIT_SP) | TMP_MEM_OFFSET_HI);
+}
+
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
{
sljit_ins *inst = (sljit_ins *)addr;
@@ -274,8 +483,3 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 2);
}
-
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
-{
- sljit_set_jump_addr(addr, new_constant, executable_offset);
-}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c
index cbdf2dd8a2..b3cf9d074d 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c
@@ -35,8 +35,9 @@
#error "Must implement count leading zeroes"
#endif
-#define PUSH_RLDICR(reg, shift) \
- push_inst(compiler, RLDI(reg, reg, 63 - shift, shift, 1))
+/* Computes SLDI(63 - shift). */
+#define PUSH_SLDI_NEG(reg, shift) \
+ push_inst(compiler, RLDICR | S(reg) | A(reg) | RLDI_SH(63 - shift) | RLDI_ME(shift))
static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm)
{
@@ -48,7 +49,7 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
if (imm <= SIMM_MAX && imm >= SIMM_MIN)
return push_inst(compiler, ADDI | D(reg) | A(0) | IMM(imm));
- if (!(imm & ~0xffff))
+ if (((sljit_uw)imm >> 16) == 0)
return push_inst(compiler, ORI | S(TMP_ZERO) | A(reg) | IMM(imm));
if (imm <= 0x7fffffffl && imm >= -0x80000000l) {
@@ -56,40 +57,45 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS;
}
+ if (((sljit_uw)imm >> 32) == 0) {
+ FAIL_IF(push_inst(compiler, ORIS | S(TMP_ZERO) | A(reg) | IMM(imm >> 16)));
+ return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS;
+ }
+
/* Count leading zeroes. */
- tmp = (imm >= 0) ? imm : ~imm;
+ tmp = (sljit_uw)((imm >= 0) ? imm : ~imm);
ASM_SLJIT_CLZ(tmp, shift);
SLJIT_ASSERT(shift > 0);
shift--;
- tmp = (imm << shift);
+ tmp = ((sljit_uw)imm << shift);
if ((tmp & ~0xffff000000000000ul) == 0) {
- FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48)));
+ FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | (sljit_ins)(tmp >> 48)));
shift += 15;
- return PUSH_RLDICR(reg, shift);
+ return PUSH_SLDI_NEG(reg, shift);
}
if ((tmp & ~0xffffffff00000000ul) == 0) {
- FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(tmp >> 48)));
+ FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | (sljit_ins)(tmp >> 48)));
FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp >> 32)));
shift += 31;
- return PUSH_RLDICR(reg, shift);
+ return PUSH_SLDI_NEG(reg, shift);
}
/* Cut out the 16 bit from immediate. */
shift += 15;
- tmp2 = imm & ((1ul << (63 - shift)) - 1);
+ tmp2 = (sljit_uw)imm & (((sljit_uw)1 << (63 - shift)) - 1);
if (tmp2 <= 0xffff) {
- FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48)));
- FAIL_IF(PUSH_RLDICR(reg, shift));
- return push_inst(compiler, ORI | S(reg) | A(reg) | tmp2);
+ FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | (sljit_ins)(tmp >> 48)));
+ FAIL_IF(PUSH_SLDI_NEG(reg, shift));
+ return push_inst(compiler, ORI | S(reg) | A(reg) | (sljit_ins)tmp2);
}
if (tmp2 <= 0xffffffff) {
FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48)));
- FAIL_IF(PUSH_RLDICR(reg, shift));
- FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | (tmp2 >> 16)));
+ FAIL_IF(PUSH_SLDI_NEG(reg, shift));
+ FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | (sljit_ins)(tmp2 >> 16)));
return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp2)) : SLJIT_SUCCESS;
}
@@ -97,25 +103,26 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
tmp2 <<= shift2;
if ((tmp2 & ~0xffff000000000000ul) == 0) {
- FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48)));
+ FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | (sljit_ins)(tmp >> 48)));
shift2 += 15;
shift += (63 - shift2);
- FAIL_IF(PUSH_RLDICR(reg, shift));
- FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | (tmp2 >> 48)));
- return PUSH_RLDICR(reg, shift2);
+ FAIL_IF(PUSH_SLDI_NEG(reg, shift));
+ FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | (sljit_ins)(tmp2 >> 48)));
+ return PUSH_SLDI_NEG(reg, shift2);
}
/* The general version. */
- FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(imm >> 48)));
+ FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | (sljit_ins)((sljit_uw)imm >> 48)));
FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm >> 32)));
- FAIL_IF(PUSH_RLDICR(reg, 31));
+ FAIL_IF(PUSH_SLDI_NEG(reg, 31));
FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(imm >> 16)));
return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm));
}
-/* Simplified mnemonics: clrldi. */
-#define INS_CLEAR_LEFT(dst, src, from) \
- (RLDICL | S(src) | A(dst) | ((from) << 6) | (1 << 5))
+#undef PUSH_SLDI_NEG
+
+#define CLRLDI(dst, src, n) \
+ (RLDICL | S(src) | A(dst) | RLDI_SH(0) | RLDI_MB(n))
/* Sign extension for integer operations. */
#define UN_EXTS() \
@@ -145,6 +152,8 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
sljit_s32 dst, sljit_s32 src1, sljit_s32 src2)
{
+ sljit_u32 imm;
+
switch (op) {
case SLJIT_MOV:
case SLJIT_MOV_P:
@@ -159,7 +168,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
if (op == SLJIT_MOV_S32)
return push_inst(compiler, EXTSW | S(src2) | A(dst));
- return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 0));
+ return push_inst(compiler, CLRLDI(dst, src2, 32));
}
else {
SLJIT_ASSERT(dst == src2);
@@ -172,7 +181,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
if (op == SLJIT_MOV_S8)
return push_inst(compiler, EXTSB | S(src2) | A(dst));
- return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24));
+ return push_inst(compiler, CLRLDI(dst, src2, 56));
}
else if ((flags & REG_DEST) && op == SLJIT_MOV_S8)
return push_inst(compiler, EXTSB | S(src2) | A(dst));
@@ -187,49 +196,39 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
if (op == SLJIT_MOV_S16)
return push_inst(compiler, EXTSH | S(src2) | A(dst));
- return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16));
+ return push_inst(compiler, CLRLDI(dst, src2, 48));
}
else {
SLJIT_ASSERT(dst == src2);
}
return SLJIT_SUCCESS;
- case SLJIT_NOT:
- SLJIT_ASSERT(src1 == TMP_REG1);
- UN_EXTS();
- return push_inst(compiler, NOR | RC(flags) | S(src2) | A(dst) | B(src2));
-
- case SLJIT_NEG:
+ case SLJIT_CLZ:
SLJIT_ASSERT(src1 == TMP_REG1);
+ return push_inst(compiler, ((flags & ALT_FORM1) ? CNTLZW : CNTLZD) | S(src2) | A(dst));
- if ((flags & (ALT_FORM1 | ALT_SIGN_EXT)) == (ALT_FORM1 | ALT_SIGN_EXT)) {
- FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1)));
- FAIL_IF(push_inst(compiler, NEG | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(TMP_REG2)));
- return push_inst(compiler, RLDI(dst, dst, 32, 32, 0));
- }
-
- UN_EXTS();
- /* Setting XER SO is not enough, CR SO is also needed. */
- return push_inst(compiler, NEG | OE((flags & ALT_FORM1) ? ALT_SET_FLAGS : 0) | RC(flags) | D(dst) | A(src2));
-
- case SLJIT_CLZ:
+ case SLJIT_CTZ:
SLJIT_ASSERT(src1 == TMP_REG1);
- if (flags & ALT_FORM1)
- return push_inst(compiler, CNTLZW | S(src2) | A(dst));
- return push_inst(compiler, CNTLZD | S(src2) | A(dst));
+ FAIL_IF(push_inst(compiler, NEG | D(TMP_REG1) | A(src2)));
+ FAIL_IF(push_inst(compiler, AND | S(src2) | A(dst) | B(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, ((flags & ALT_FORM1) ? CNTLZW : CNTLZD) | S(dst) | A(dst)));
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG1) | A(dst) | IMM((flags & ALT_FORM1) ? -32 : -64)));
+ /* The highest bits are set, if dst < bit width, zero otherwise. */
+ FAIL_IF(push_inst(compiler, ((flags & ALT_FORM1) ? SRWI(27) : SRDI(58)) | S(TMP_REG1) | A(TMP_REG1)));
+ return push_inst(compiler, XOR | S(dst) | A(dst) | B(TMP_REG1));
case SLJIT_ADD:
if (flags & ALT_FORM1) {
if (flags & ALT_SIGN_EXT) {
- FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, src1, 32, 31, 1)));
+ FAIL_IF(push_inst(compiler, SLDI(32) | S(src1) | A(TMP_REG1)));
src1 = TMP_REG1;
- FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1)));
+ FAIL_IF(push_inst(compiler, SLDI(32) | S(src2) | A(TMP_REG2)));
src2 = TMP_REG2;
}
/* Setting XER SO is not enough, CR SO is also needed. */
FAIL_IF(push_inst(compiler, ADD | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)));
if (flags & ALT_SIGN_EXT)
- return push_inst(compiler, RLDI(dst, dst, 32, 32, 0));
+ return push_inst(compiler, SRDI(32) | S(dst) | A(dst));
return SLJIT_SUCCESS;
}
@@ -240,12 +239,14 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
if (flags & ALT_FORM3)
return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm);
+ imm = compiler->imm;
+
if (flags & ALT_FORM4) {
- FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))));
+ FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((imm >> 16) & 0xffff) + ((imm >> 15) & 0x1))));
src1 = dst;
}
- return push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff));
+ return push_inst(compiler, ADDI | D(dst) | A(src1) | (imm & 0xffff));
}
if (flags & ALT_FORM3) {
SLJIT_ASSERT(src2 == TMP_REG2);
@@ -299,15 +300,24 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
if (flags & ALT_FORM3) {
if (flags & ALT_SIGN_EXT) {
- FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, src1, 32, 31, 1)));
- src1 = TMP_REG1;
- FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1)));
- src2 = TMP_REG2;
+ if (src1 != TMP_ZERO) {
+ FAIL_IF(push_inst(compiler, SLDI(32) | S(src1) | A(TMP_REG1)));
+ src1 = TMP_REG1;
+ }
+ if (src2 != TMP_ZERO) {
+ FAIL_IF(push_inst(compiler, SLDI(32) | S(src2) | A(TMP_REG2)));
+ src2 = TMP_REG2;
+ }
}
+
/* Setting XER SO is not enough, CR SO is also needed. */
- FAIL_IF(push_inst(compiler, SUBF | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)));
+ if (src1 != TMP_ZERO)
+ FAIL_IF(push_inst(compiler, SUBF | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)));
+ else
+ FAIL_IF(push_inst(compiler, NEG | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2)));
+
if (flags & ALT_SIGN_EXT)
- return push_inst(compiler, RLDI(dst, dst, 32, 32, 0));
+ return push_inst(compiler, SRDI(32) | S(dst) | A(dst));
return SLJIT_SUCCESS;
}
@@ -317,12 +327,18 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
return push_inst(compiler, SUBFIC | D(dst) | A(src1) | compiler->imm);
}
- if (!(flags & ALT_SET_FLAGS))
+ if (!(flags & ALT_SET_FLAGS)) {
+ SLJIT_ASSERT(src1 != TMP_ZERO);
return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1));
+ }
+
BIN_EXTS();
if (flags & ALT_FORM5)
return push_inst(compiler, SUBFC | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1));
- return push_inst(compiler, SUBF | RC(flags) | D(dst) | A(src2) | B(src1));
+
+ if (src1 != TMP_ZERO)
+ return push_inst(compiler, SUBF | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1));
+ return push_inst(compiler, NEG | RC(ALT_SET_FLAGS) | D(dst) | A(src2));
case SLJIT_SUBC:
BIN_EXTS();
@@ -360,8 +376,10 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
}
if (flags & ALT_FORM3) {
SLJIT_ASSERT(src2 == TMP_REG2);
- FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm)));
- return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16));
+ imm = compiler->imm;
+
+ FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(imm)));
+ return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(imm >> 16));
}
return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2));
@@ -376,46 +394,110 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
}
if (flags & ALT_FORM3) {
SLJIT_ASSERT(src2 == TMP_REG2);
- FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm)));
- return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16));
+ imm = compiler->imm;
+
+ FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(imm)));
+ return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(imm >> 16));
+ }
+ if (flags & ALT_FORM4) {
+ SLJIT_ASSERT(src1 == TMP_REG1);
+ UN_EXTS();
+ return push_inst(compiler, NOR | RC(flags) | S(src2) | A(dst) | B(src2));
}
return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2));
case SLJIT_SHL:
+ case SLJIT_MSHL:
if (flags & ALT_FORM1) {
SLJIT_ASSERT(src2 == TMP_REG2);
+ imm = compiler->imm;
+
if (flags & ALT_FORM2) {
- compiler->imm &= 0x1f;
- return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1));
+ imm &= 0x1f;
+ return push_inst(compiler, SLWI(imm) | RC(flags) | S(src1) | A(dst));
}
- compiler->imm &= 0x3f;
- return push_inst(compiler, RLDI(dst, src1, compiler->imm, 63 - compiler->imm, 1) | RC(flags));
+
+ imm &= 0x3f;
+ return push_inst(compiler, SLDI(imm) | RC(flags) | S(src1) | A(dst));
+ }
+
+ if (op == SLJIT_MSHL) {
+ FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | ((flags & ALT_FORM2) ? 0x1f : 0x3f)));
+ src2 = TMP_REG2;
}
+
return push_inst(compiler, ((flags & ALT_FORM2) ? SLW : SLD) | RC(flags) | S(src1) | A(dst) | B(src2));
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
if (flags & ALT_FORM1) {
SLJIT_ASSERT(src2 == TMP_REG2);
+ imm = compiler->imm;
+
if (flags & ALT_FORM2) {
- compiler->imm &= 0x1f;
- return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1));
+ imm &= 0x1f;
+ /* Since imm can be 0, SRWI() cannot be used. */
+ return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | RLWI_SH((32 - imm) & 0x1f) | RLWI_MBE(imm, 31));
}
- compiler->imm &= 0x3f;
- return push_inst(compiler, RLDI(dst, src1, 64 - compiler->imm, compiler->imm, 0) | RC(flags));
+
+ imm &= 0x3f;
+ /* Since imm can be 0, SRDI() cannot be used. */
+ return push_inst(compiler, RLDICL | RC(flags) | S(src1) | A(dst) | RLDI_SH((64 - imm) & 0x3f) | RLDI_MB(imm));
}
+
+ if (op == SLJIT_MLSHR) {
+ FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | ((flags & ALT_FORM2) ? 0x1f : 0x3f)));
+ src2 = TMP_REG2;
+ }
+
return push_inst(compiler, ((flags & ALT_FORM2) ? SRW : SRD) | RC(flags) | S(src1) | A(dst) | B(src2));
case SLJIT_ASHR:
+ case SLJIT_MASHR:
if (flags & ALT_FORM1) {
SLJIT_ASSERT(src2 == TMP_REG2);
+ imm = compiler->imm;
+
if (flags & ALT_FORM2) {
- compiler->imm &= 0x1f;
- return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11));
+ imm &= 0x1f;
+ return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (imm << 11));
}
- compiler->imm &= 0x3f;
- return push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | ((compiler->imm & 0x1f) << 11) | ((compiler->imm & 0x20) >> 4));
+
+ imm &= 0x3f;
+ return push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | RLDI_SH(imm));
+ }
+
+ if (op == SLJIT_MASHR) {
+ FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | ((flags & ALT_FORM2) ? 0x1f : 0x3f)));
+ src2 = TMP_REG2;
}
+
return push_inst(compiler, ((flags & ALT_FORM2) ? SRAW : SRAD) | RC(flags) | S(src1) | A(dst) | B(src2));
+
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
+ if (flags & ALT_FORM1) {
+ SLJIT_ASSERT(src2 == TMP_REG2);
+ imm = compiler->imm;
+
+ if (op == SLJIT_ROTR)
+ imm = (sljit_u32)(-(sljit_s32)imm);
+
+ if (flags & ALT_FORM2) {
+ imm &= 0x1f;
+ return push_inst(compiler, RLWINM | S(src1) | A(dst) | RLWI_SH(imm) | RLWI_MBE(0, 31));
+ }
+
+ imm &= 0x3f;
+ return push_inst(compiler, RLDICL | S(src1) | A(dst) | RLDI_SH(imm));
+ }
+
+ if (op == SLJIT_ROTR) {
+ FAIL_IF(push_inst(compiler, SUBFIC | D(TMP_REG2) | A(src2) | 0));
+ src2 = TMP_REG2;
+ }
+
+ return push_inst(compiler, ((flags & ALT_FORM2) ? (RLWNM | RLWI_MBE(0, 31)) : (RLDCL | RLDI_MB(0))) | S(src1) | A(dst) | B(src2));
}
SLJIT_UNREACHABLE();
@@ -432,14 +514,14 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
if (src)
reg = *src & REG_MASK;
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+ types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ switch (arg_types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
arg_count++;
break;
default:
@@ -453,13 +535,13 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
while (types) {
- switch (types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ switch (types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
arg_count--;
break;
default:
@@ -471,7 +553,7 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
break;
}
- types >>= SLJIT_DEF_SHIFT;
+ types >>= SLJIT_ARG_SHIFT;
}
return SLJIT_SUCCESS;
@@ -481,27 +563,157 @@ static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_
{
FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 48)));
FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value >> 32)));
- FAIL_IF(PUSH_RLDICR(reg, 31));
+ FAIL_IF(push_inst(compiler, SLDI(32) | S(reg) | A(reg)));
FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(init_value >> 16)));
return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value));
}
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+
+ if (src == SLJIT_IMM) {
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
+ srcw = (sljit_s32)srcw;
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
+ src = TMP_REG1;
+ } else if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) {
+ if (FAST_IS_REG(src))
+ FAIL_IF(push_inst(compiler, EXTSW | S(src) | A(TMP_REG1)));
+ else
+ FAIL_IF(emit_op_mem(compiler, INT_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ }
+
+ if (FAST_IS_REG(src)) {
+ FAIL_IF(push_inst(compiler, STD | S(src) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, LFD | FS(TMP_FREG1) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ } else
+ FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, TMP_REG1));
+
+ FAIL_IF(push_inst(compiler, FCFID | FD(dst_r) | FB(TMP_FREG1)));
+
+ if (op & SLJIT_32)
+ FAIL_IF(push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r)));
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1);
+ return SLJIT_SUCCESS;
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_U32) {
+ if (src == SLJIT_IMM) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_u32)srcw));
+ src = TMP_REG1;
+ } else {
+ if (FAST_IS_REG(src))
+ FAIL_IF(push_inst(compiler, CLRLDI(TMP_REG1, src, 32)));
+ else
+ FAIL_IF(emit_op_mem(compiler, INT_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ }
+
+ FAIL_IF(push_inst(compiler, STD | S(src) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, LFD | FS(TMP_FREG1) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, FCFID | FD(dst_r) | FB(TMP_FREG1)));
+ } else {
+ if (src == SLJIT_IMM) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
+ src = TMP_REG1;
+ } else if (src & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ }
+
+ FAIL_IF(push_inst(compiler, CMPI | CRD(0 | 1) | A(src) | 0));
+ FAIL_IF(push_inst(compiler, BCx | (12 << 21) | (0 << 16) | 20));
+ FAIL_IF(push_inst(compiler, STD | S(src) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, LFD | FS(TMP_FREG1) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, FCFID | FD(dst_r) | FB(TMP_FREG1)));
+ FAIL_IF(push_inst(compiler, Bx | ((op & SLJIT_32) ? 36 : 32)));
+
+ if (op & SLJIT_32)
+ FAIL_IF(push_inst(compiler, RLWINM | S(src) | A(TMP_REG2) | RLWI_SH(10) | RLWI_MBE(10, 21)));
+ else
+ FAIL_IF(push_inst(compiler, ANDI | S(src) | A(TMP_REG2) | 0x1));
+
+ /* Shift right. */
+ FAIL_IF(push_inst(compiler, RLDICL | S(src) | A(TMP_REG1) | RLDI_SH(63) | RLDI_MB(1)));
+
+ if (op & SLJIT_32)
+ FAIL_IF(push_inst(compiler, RLDICR | S(TMP_REG1) | A(TMP_REG1) | RLDI_SH(0) | RLDI_ME(53)));
+
+ FAIL_IF(push_inst(compiler, OR | S(TMP_REG1) | A(TMP_REG1) | B(TMP_REG2)));
+
+ FAIL_IF(push_inst(compiler, STD | S(TMP_REG1) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, LFD | FS(TMP_FREG1) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ FAIL_IF(push_inst(compiler, FCFID | FD(dst_r) | FB(TMP_FREG1)));
+ FAIL_IF(push_inst(compiler, FADD | FD(dst_r) | FA(dst_r) | FB(dst_r)));
+ }
+
+ if (op & SLJIT_32)
+ FAIL_IF(push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r)));
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1);
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ union {
+ sljit_sw imm;
+ sljit_f64 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm != 0)
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm));
+
+ FAIL_IF(push_inst(compiler, STD | S(u.imm != 0 ? TMP_REG1 : TMP_ZERO) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ return push_inst(compiler, LFD | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ if (GET_OPCODE(op) == SLJIT_COPY_TO_F64) {
+ FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? STW : STD) | S(reg) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ return push_inst(compiler, ((op & SLJIT_32) ? LFS : LFD) | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET);
+ }
+
+ FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? STFS : STFD) | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ return push_inst(compiler, ((op & SLJIT_32) ? LWZ : LD) | S(reg) | A(SLJIT_SP) | TMP_MEM_OFFSET);
+}
+
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
{
sljit_ins *inst = (sljit_ins*)addr;
SLJIT_UNUSED_ARG(executable_offset);
SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 0);
- inst[0] = (inst[0] & 0xffff0000) | ((new_target >> 48) & 0xffff);
- inst[1] = (inst[1] & 0xffff0000) | ((new_target >> 32) & 0xffff);
- inst[3] = (inst[3] & 0xffff0000) | ((new_target >> 16) & 0xffff);
- inst[4] = (inst[4] & 0xffff0000) | (new_target & 0xffff);
+ inst[0] = (inst[0] & 0xffff0000u) | ((sljit_ins)(new_target >> 48) & 0xffff);
+ inst[1] = (inst[1] & 0xffff0000u) | ((sljit_ins)(new_target >> 32) & 0xffff);
+ inst[3] = (inst[3] & 0xffff0000u) | ((sljit_ins)(new_target >> 16) & 0xffff);
+ inst[4] = (inst[4] & 0xffff0000u) | ((sljit_ins)new_target & 0xffff);
SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 1);
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 5);
}
-
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
-{
- sljit_set_jump_addr(addr, new_constant, executable_offset);
-}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c
index 2174dbb07b..54977f02e3 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c
@@ -109,32 +109,32 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 7] = {
};
static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
- 0, 1, 2, 3, 4, 5, 6, 0, 7
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 0, 13
};
/* --------------------------------------------------------------------- */
/* Instrucion forms */
/* --------------------------------------------------------------------- */
-#define D(d) (reg_map[d] << 21)
-#define S(s) (reg_map[s] << 21)
-#define A(a) (reg_map[a] << 16)
-#define B(b) (reg_map[b] << 11)
-#define C(c) (reg_map[c] << 6)
-#define FD(fd) (freg_map[fd] << 21)
-#define FS(fs) (freg_map[fs] << 21)
-#define FA(fa) (freg_map[fa] << 16)
-#define FB(fb) (freg_map[fb] << 11)
-#define FC(fc) (freg_map[fc] << 6)
-#define IMM(imm) ((imm) & 0xffff)
-#define CRD(d) ((d) << 21)
+#define D(d) ((sljit_ins)reg_map[d] << 21)
+#define S(s) ((sljit_ins)reg_map[s] << 21)
+#define A(a) ((sljit_ins)reg_map[a] << 16)
+#define B(b) ((sljit_ins)reg_map[b] << 11)
+#define C(c) ((sljit_ins)reg_map[c] << 6)
+#define FD(fd) ((sljit_ins)freg_map[fd] << 21)
+#define FS(fs) ((sljit_ins)freg_map[fs] << 21)
+#define FA(fa) ((sljit_ins)freg_map[fa] << 16)
+#define FB(fb) ((sljit_ins)freg_map[fb] << 11)
+#define FC(fc) ((sljit_ins)freg_map[fc] << 6)
+#define IMM(imm) ((sljit_ins)(imm) & 0xffff)
+#define CRD(d) ((sljit_ins)(d) << 21)
/* Instruction bit sections.
OE and Rc flag (see ALT_SET_FLAGS). */
#define OE(flags) ((flags) & ALT_SET_FLAGS)
/* Rc flag (see ALT_SET_FLAGS). */
-#define RC(flags) (((flags) & ALT_SET_FLAGS) >> 10)
-#define HI(opcode) ((opcode) << 26)
-#define LO(opcode) ((opcode) << 1)
+#define RC(flags) ((sljit_ins)((flags) & ALT_SET_FLAGS) >> 10)
+#define HI(opcode) ((sljit_ins)(opcode) << 26)
+#define LO(opcode) ((sljit_ins)(opcode) << 1)
#define ADD (HI(31) | LO(266))
#define ADDC (HI(31) | LO(10))
@@ -150,6 +150,9 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define BCx (HI(16))
#define BCCTR (HI(19) | LO(528) | (3 << 11))
#define BLR (HI(19) | LO(16) | (0x14 << 21))
+#if defined(_ARCH_PWR10) && _ARCH_PWR10
+#define BRD (HI(31) | LO(187))
+#endif /* POWER10 */
#define CNTLZD (HI(31) | LO(58))
#define CNTLZW (HI(31) | LO(26))
#define CMP (HI(31) | LO(0))
@@ -182,6 +185,13 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define FSUB (HI(63) | LO(20))
#define FSUBS (HI(59) | LO(20))
#define LD (HI(58) | 0)
+#define LFD (HI(50))
+#define LFS (HI(48))
+#if defined(_ARCH_PWR7) && _ARCH_PWR7
+#define LDBRX (HI(31) | LO(532))
+#endif /* POWER7 */
+#define LHBRX (HI(31) | LO(790))
+#define LWBRX (HI(31) | LO(534))
#define LWZ (HI(32))
#define MFCR (HI(31) | LO(19))
#define MFLR (HI(31) | LO(339) | 0x80000)
@@ -202,8 +212,13 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define OR (HI(31) | LO(444))
#define ORI (HI(24))
#define ORIS (HI(25))
-#define RLDICL (HI(30))
+#define RLDCL (HI(30) | LO(8))
+#define RLDICL (HI(30) | LO(0 << 1))
+#define RLDICR (HI(30) | LO(1 << 1))
+#define RLDIMI (HI(30) | LO(3 << 1))
+#define RLWIMI (HI(20))
#define RLWINM (HI(21))
+#define RLWNM (HI(23))
#define SLD (HI(31) | LO(27))
#define SLW (HI(31) | LO(24))
#define SRAD (HI(31) | LO(794))
@@ -213,10 +228,17 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define SRD (HI(31) | LO(539))
#define SRW (HI(31) | LO(536))
#define STD (HI(62) | 0)
+#if defined(_ARCH_PWR7) && _ARCH_PWR7
+#define STDBRX (HI(31) | LO(660))
+#endif /* POWER7 */
#define STDU (HI(62) | 1)
#define STDUX (HI(31) | LO(181))
+#define STFD (HI(54))
#define STFIWX (HI(31) | LO(983))
+#define STFS (HI(52))
+#define STHBRX (HI(31) | LO(918))
#define STW (HI(36))
+#define STWBRX (HI(31) | LO(662))
#define STWU (HI(37))
#define STWUX (HI(31) | LO(183))
#define SUBF (HI(31) | LO(40))
@@ -231,16 +253,48 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define SIMM_MIN (-0x8000)
#define UIMM_MAX (0xffff)
-#define RLDI(dst, src, sh, mb, type) \
- (HI(30) | S(src) | A(dst) | ((type) << 2) | (((sh) & 0x1f) << 11) | (((sh) & 0x20) >> 4) | (((mb) & 0x1f) << 6) | ((mb) & 0x20))
+/* Shift helpers. */
+#define RLWI_SH(sh) ((sljit_ins)(sh) << 11)
+#define RLWI_MBE(mb, me) (((sljit_ins)(mb) << 6) | ((sljit_ins)(me) << 1))
+#define RLDI_SH(sh) ((((sljit_ins)(sh) & 0x1f) << 11) | (((sljit_ins)(sh) & 0x20) >> 4))
+#define RLDI_MB(mb) ((((sljit_ins)(mb) & 0x1f) << 6) | ((sljit_ins)(mb) & 0x20))
+#define RLDI_ME(me) RLDI_MB(me)
+
+#define SLWI(shift) (RLWINM | RLWI_SH(shift) | RLWI_MBE(0, 31 - (shift)))
+#define SLDI(shift) (RLDICR | RLDI_SH(shift) | RLDI_ME(63 - (shift)))
+/* shift > 0 */
+#define SRWI(shift) (RLWINM | RLWI_SH(32 - (shift)) | RLWI_MBE((shift), 31))
+#define SRDI(shift) (RLDICL | RLDI_SH(64 - (shift)) | RLDI_MB(shift))
+
+#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
+#define SLWI_W(shift) SLWI(shift)
+#define TMP_MEM_OFFSET (2 * sizeof(sljit_sw))
+#else /* !SLJIT_CONFIG_PPC_32 */
+#define SLWI_W(shift) SLDI(shift)
+#define TMP_MEM_OFFSET (6 * sizeof(sljit_sw))
+#endif /* SLJIT_CONFIG_PPC_32 */
+
+#if (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN)
+#define TMP_MEM_OFFSET_LO (TMP_MEM_OFFSET)
+#define TMP_MEM_OFFSET_HI (TMP_MEM_OFFSET + sizeof(sljit_s32))
+#define LWBRX_FIRST_REG S(TMP_REG1)
+#define LWBRX_SECOND_REG S(dst)
+#else /* !SLJIT_LITTLE_ENDIAN */
+#define TMP_MEM_OFFSET_LO (TMP_MEM_OFFSET + sizeof(sljit_s32))
+#define TMP_MEM_OFFSET_HI (TMP_MEM_OFFSET)
+#define LWBRX_FIRST_REG S(dst)
+#define LWBRX_SECOND_REG S(TMP_REG1)
+#endif /* SLJIT_LITTLE_ENDIAN */
#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL)
-SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct sljit_function_context* context, sljit_sw addr, void* func)
+SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct sljit_function_context* context, sljit_uw addr, void* func)
{
- sljit_sw* ptrs;
+ sljit_uw* ptrs;
+
if (func_ptr)
*func_ptr = (void*)context;
- ptrs = (sljit_sw*)func;
+
+ ptrs = (sljit_uw*)func;
context->addr = addr ? addr : ptrs[0];
context->r2 = ptrs[1];
context->r11 = ptrs[2];
@@ -260,7 +314,7 @@ static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_in
{
sljit_sw diff;
sljit_uw target_addr;
- sljit_sw extra_jump_flags;
+ sljit_uw extra_jump_flags;
#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL) && (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
if (jump->flags & (SLJIT_REWRITABLE_JUMP | IS_CALL))
@@ -296,7 +350,7 @@ static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_in
}
extra_jump_flags = REMOVE_COND;
- diff -= sizeof(sljit_ins);
+ diff -= SSIZE_OF(ins);
}
if (diff <= 0x01ffffff && diff >= -0x02000000) {
@@ -349,7 +403,7 @@ static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label)
{
sljit_uw addr = put_label->label->addr;
sljit_ins *inst = (sljit_ins *)put_label->addr;
- sljit_s32 reg = *inst;
+ sljit_u32 reg = *inst;
if (put_label->flags == 0) {
SLJIT_ASSERT(addr < 0x100000000l);
@@ -363,10 +417,10 @@ static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label)
else {
inst[0] = ORIS | S(TMP_ZERO) | A(reg) | IMM(addr >> 48);
inst[1] = ORI | S(reg) | A(reg) | IMM((addr >> 32) & 0xffff);
- inst ++;
+ inst++;
}
- inst[1] = RLDI(reg, reg, 32, 31, 1);
+ inst[1] = SLDI(32) | S(reg) | A(reg);
inst[2] = ORIS | S(reg) | A(reg) | IMM((addr >> 16) & 0xffff);
inst += 2;
}
@@ -374,7 +428,7 @@ static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label)
inst[1] = ORI | S(reg) | A(reg) | IMM(addr & 0xffff);
}
-#endif
+#endif /* SLJIT_CONFIG_PPC_64 */
SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
{
@@ -398,6 +452,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
reverse_buf(compiler);
#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL)
+ /* add to compiler->size additional instruction space to hold the trampoline and padding */
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
compiler->size += (compiler->size & 0x1) + (sizeof(struct sljit_function_context) / sizeof(sljit_ins));
#else
@@ -433,7 +488,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (label && label->size == word_count) {
/* Just recording the address. */
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
if (jump && jump->addr == word_count) {
@@ -492,8 +547,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
}
next_addr = compute_next_addr(label, jump, const_, put_label);
}
- code_ptr ++;
- word_count ++;
+ code_ptr++;
+ word_count++;
} while (buf_ptr < buf_end);
buf = buf->next;
@@ -501,7 +556,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (label && label->size == word_count) {
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
@@ -511,7 +566,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
SLJIT_ASSERT(!put_label);
#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL)
- SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size - (sizeof(struct sljit_function_context) / sizeof(sljit_ins)));
+ SLJIT_ASSERT(code_ptr - code <= (sljit_sw)(compiler->size - (sizeof(struct sljit_function_context) / sizeof(sljit_ins))));
#else
SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size);
#endif
@@ -527,22 +582,22 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (!(jump->flags & PATCH_ABS_B)) {
addr -= (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset);
SLJIT_ASSERT((sljit_sw)addr <= 0x7fff && (sljit_sw)addr >= -0x8000);
- *buf_ptr = BCx | (addr & 0xfffc) | ((*buf_ptr) & 0x03ff0001);
+ *buf_ptr = BCx | ((sljit_ins)addr & 0xfffc) | ((*buf_ptr) & 0x03ff0001);
}
else {
SLJIT_ASSERT(addr <= 0xffff);
- *buf_ptr = BCx | (addr & 0xfffc) | 0x2 | ((*buf_ptr) & 0x03ff0001);
+ *buf_ptr = BCx | ((sljit_ins)addr & 0xfffc) | 0x2 | ((*buf_ptr) & 0x03ff0001);
}
}
else {
if (!(jump->flags & PATCH_ABS_B)) {
addr -= (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset);
SLJIT_ASSERT((sljit_sw)addr <= 0x01ffffff && (sljit_sw)addr >= -0x02000000);
- *buf_ptr = Bx | (addr & 0x03fffffc) | ((*buf_ptr) & 0x1);
+ *buf_ptr = Bx | ((sljit_ins)addr & 0x03fffffc) | ((*buf_ptr) & 0x1);
}
else {
SLJIT_ASSERT(addr <= 0x03ffffff);
- *buf_ptr = Bx | (addr & 0x03fffffc) | 0x2 | ((*buf_ptr) & 0x1);
+ *buf_ptr = Bx | ((sljit_ins)addr & 0x03fffffc) | 0x2 | ((*buf_ptr) & 0x1);
}
}
break;
@@ -550,26 +605,32 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
/* Set the fields of immediate loads. */
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
- buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff);
- buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff);
+ SLJIT_ASSERT(((buf_ptr[0] | buf_ptr[1]) & 0xffff) == 0);
+ buf_ptr[0] |= (sljit_ins)(addr >> 16) & 0xffff;
+ buf_ptr[1] |= (sljit_ins)addr & 0xffff;
#else
if (jump->flags & PATCH_ABS32) {
SLJIT_ASSERT(addr <= 0x7fffffff);
- buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff);
- buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff);
+ SLJIT_ASSERT(((buf_ptr[0] | buf_ptr[1]) & 0xffff) == 0);
+ buf_ptr[0] |= (sljit_ins)(addr >> 16) & 0xffff;
+ buf_ptr[1] |= (sljit_ins)addr & 0xffff;
break;
}
+
if (jump->flags & PATCH_ABS48) {
SLJIT_ASSERT(addr <= 0x7fffffffffff);
- buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 32) & 0xffff);
- buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | ((addr >> 16) & 0xffff);
- buf_ptr[3] = (buf_ptr[3] & 0xffff0000) | (addr & 0xffff);
+ SLJIT_ASSERT(((buf_ptr[0] | buf_ptr[1] | buf_ptr[3]) & 0xffff) == 0);
+ buf_ptr[0] |= (sljit_ins)(addr >> 32) & 0xffff;
+ buf_ptr[1] |= (sljit_ins)(addr >> 16) & 0xffff;
+ buf_ptr[3] |= (sljit_ins)addr & 0xffff;
break;
}
- buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 48) & 0xffff);
- buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | ((addr >> 32) & 0xffff);
- buf_ptr[3] = (buf_ptr[3] & 0xffff0000) | ((addr >> 16) & 0xffff);
- buf_ptr[4] = (buf_ptr[4] & 0xffff0000) | (addr & 0xffff);
+
+ SLJIT_ASSERT(((buf_ptr[0] | buf_ptr[1] | buf_ptr[3] | buf_ptr[4]) & 0xffff) == 0);
+ buf_ptr[0] |= (sljit_ins)(addr >> 48) & 0xffff;
+ buf_ptr[1] |= (sljit_ins)(addr >> 32) & 0xffff;
+ buf_ptr[3] |= (sljit_ins)(addr >> 16) & 0xffff;
+ buf_ptr[4] |= (sljit_ins)addr & 0xffff;
#endif
} while (0);
jump = jump->next;
@@ -592,7 +653,6 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
compiler->error = SLJIT_ERR_COMPILED;
compiler->executable_offset = executable_offset;
- compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins);
code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
@@ -601,7 +661,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (((sljit_sw)code_ptr) & 0x4)
code_ptr++;
#endif
- sljit_set_function_context(NULL, (struct sljit_function_context*)code_ptr, (sljit_sw)code, (void*)sljit_generate_code);
+ sljit_set_function_context(NULL, (struct sljit_function_context*)code_ptr, (sljit_uw)code, (void*)sljit_generate_code);
#endif
code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
@@ -610,8 +670,12 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
SLJIT_UPDATE_WX_FLAGS(code, code_ptr, 1);
#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL)
+ compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_ins) + sizeof(struct sljit_function_context);
+
return code_ptr;
#else
+ compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_ins);
+
return code;
#endif
}
@@ -621,23 +685,47 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
switch (feature_type) {
case SLJIT_HAS_FPU:
#ifdef SLJIT_IS_FPU_AVAILABLE
- return SLJIT_IS_FPU_AVAILABLE;
+ return (SLJIT_IS_FPU_AVAILABLE) != 0;
#else
/* Available by default. */
return 1;
#endif
-
+ case SLJIT_HAS_REV:
+#if defined(_ARCH_PWR10) && _ARCH_PWR10
+ return 1;
+#else /* !POWER10 */
+ return 2;
+#endif /* POWER10 */
/* A saved register is set to a zero value. */
case SLJIT_HAS_ZERO_REGISTER:
case SLJIT_HAS_CLZ:
+ case SLJIT_HAS_ROT:
case SLJIT_HAS_PREFETCH:
return 1;
+ case SLJIT_HAS_CTZ:
+ return 2;
+
default:
return 0;
}
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
+{
+ switch (type) {
+ case SLJIT_UNORDERED_OR_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS:
+ case SLJIT_ORDERED_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER:
+ case SLJIT_ORDERED_LESS_EQUAL:
+ return 1;
+ }
+
+ return 0;
+}
+
/* --------------------------------------------------------------------- */
/* Entry, exit */
/* --------------------------------------------------------------------- */
@@ -659,6 +747,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#define MEM_MASK 0x7f
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_32) >> 6))
+
/* Other inp_flags. */
/* Integer opertion and set flags -> requires exts on 64 bit systems. */
@@ -682,6 +772,9 @@ ALT_FORM1 0x001000
...
ALT_FORM5 0x010000 */
+static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flags, sljit_s32 reg,
+ sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg);
+
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
#include "sljitNativePPC_32.c"
#else
@@ -696,69 +789,127 @@ ALT_FORM5 0x010000 */
#define STACK_LOAD LD
#endif
+#if (defined SLJIT_PPC_STACK_FRAME_V2 && SLJIT_PPC_STACK_FRAME_V2)
+#define LR_SAVE_OFFSET (2 * SSIZE_OF(sw))
+#else
+#define LR_SAVE_OFFSET SSIZE_OF(sw)
+#endif
+
+#define STACK_MAX_DISTANCE (0x8000 - SSIZE_OF(sw) - LR_SAVE_OFFSET)
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 args, i, tmp, offs;
+ sljit_s32 i, tmp, base, offset;
+ sljit_s32 word_arg_count = 0;
+ sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ sljit_s32 arg_count = 0;
+#endif
CHECK_ERROR();
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- FAIL_IF(push_inst(compiler, MFLR | D(0)));
- offs = -(sljit_s32)(sizeof(sljit_sw));
- FAIL_IF(push_inst(compiler, STACK_STORE | S(TMP_ZERO) | A(SLJIT_SP) | IMM(offs)));
+ local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 0)
+ + GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
- tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = SLJIT_S0; i >= tmp; i--) {
- offs -= (sljit_s32)(sizeof(sljit_sw));
- FAIL_IF(push_inst(compiler, STACK_STORE | S(i) | A(SLJIT_SP) | IMM(offs)));
- }
+ if (!(options & SLJIT_ENTER_REG_ARG))
+ local_size += SSIZE_OF(sw);
- for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
- offs -= (sljit_s32)(sizeof(sljit_sw));
- FAIL_IF(push_inst(compiler, STACK_STORE | S(i) | A(SLJIT_SP) | IMM(offs)));
- }
+ local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf;
+ compiler->local_size = local_size;
- SLJIT_ASSERT(offs == -(sljit_s32)GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 1));
+ FAIL_IF(push_inst(compiler, MFLR | D(0)));
-#if (defined SLJIT_PPC_STACK_FRAME_V2 && SLJIT_PPC_STACK_FRAME_V2)
- FAIL_IF(push_inst(compiler, STACK_STORE | S(0) | A(SLJIT_SP) | IMM(2 * sizeof(sljit_sw))));
+ base = SLJIT_SP;
+ offset = local_size;
+
+ if (local_size <= STACK_MAX_DISTANCE) {
+#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
+ FAIL_IF(push_inst(compiler, STWU | S(SLJIT_SP) | A(SLJIT_SP) | IMM(-local_size)));
+#else
+ FAIL_IF(push_inst(compiler, STDU | S(SLJIT_SP) | A(SLJIT_SP) | IMM(-local_size)));
+#endif
+ } else {
+ base = TMP_REG1;
+ FAIL_IF(push_inst(compiler, OR | S(SLJIT_SP) | A(TMP_REG1) | B(SLJIT_SP)));
+ FAIL_IF(load_immediate(compiler, TMP_REG2, -local_size));
+#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
+ FAIL_IF(push_inst(compiler, STWUX | S(SLJIT_SP) | A(SLJIT_SP) | B(TMP_REG2)));
#else
- FAIL_IF(push_inst(compiler, STACK_STORE | S(0) | A(SLJIT_SP) | IMM(sizeof(sljit_sw))));
+ FAIL_IF(push_inst(compiler, STDUX | S(SLJIT_SP) | A(SLJIT_SP) | B(TMP_REG2)));
#endif
+ local_size = 0;
+ offset = 0;
+ }
- FAIL_IF(push_inst(compiler, ADDI | D(TMP_ZERO) | A(0) | 0));
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, STFD | FS(i) | A(base) | IMM(offset)));
+ }
- args = get_arg_count(arg_types);
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, STFD | FS(i) | A(base) | IMM(offset)));
+ }
- if (args >= 1)
- FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0) | A(SLJIT_S0) | B(SLJIT_R0)));
- if (args >= 2)
- FAIL_IF(push_inst(compiler, OR | S(SLJIT_R1) | A(SLJIT_S1) | B(SLJIT_R1)));
- if (args >= 3)
- FAIL_IF(push_inst(compiler, OR | S(SLJIT_R2) | A(SLJIT_S2) | B(SLJIT_R2)));
+ if (!(options & SLJIT_ENTER_REG_ARG)) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STACK_STORE | S(TMP_ZERO) | A(base) | IMM(offset)));
+ }
- local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET;
- local_size = (local_size + 15) & ~0xf;
- compiler->local_size = local_size;
+ tmp = SLJIT_S0 - saveds;
+ for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STACK_STORE | S(i) | A(base) | IMM(offset)));
+ }
-#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
- if (local_size <= SIMM_MAX)
- FAIL_IF(push_inst(compiler, STWU | S(SLJIT_SP) | A(SLJIT_SP) | IMM(-local_size)));
- else {
- FAIL_IF(load_immediate(compiler, 0, -local_size));
- FAIL_IF(push_inst(compiler, STWUX | S(SLJIT_SP) | A(SLJIT_SP) | B(0)));
+ for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STACK_STORE | S(i) | A(base) | IMM(offset)));
}
+
+ FAIL_IF(push_inst(compiler, STACK_STORE | S(0) | A(base) | IMM(local_size + LR_SAVE_OFFSET)));
+
+ if (options & SLJIT_ENTER_REG_ARG)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_ZERO) | A(0) | 0));
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ saved_arg_count = 0;
+
+ while (arg_types > 0) {
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ do {
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ tmp = SLJIT_S0 - saved_arg_count;
+ saved_arg_count++;
+ } else if (arg_count != word_arg_count)
+ tmp = SLJIT_R0 + word_arg_count;
+ else
+ break;
+
+ FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0 + arg_count) | A(tmp) | B(SLJIT_R0 + arg_count)));
+ } while (0);
#else
- if (local_size <= SIMM_MAX)
- FAIL_IF(push_inst(compiler, STDU | S(SLJIT_SP) | A(SLJIT_SP) | IMM(-local_size)));
- else {
- FAIL_IF(load_immediate(compiler, 0, -local_size));
- FAIL_IF(push_inst(compiler, STDUX | S(SLJIT_SP) | A(SLJIT_SP) | B(0)));
- }
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0 + word_arg_count) | A(SLJIT_S0 - saved_arg_count) | B(SLJIT_R0 + word_arg_count)));
+ saved_arg_count++;
+ }
+#endif
+ word_arg_count++;
+ }
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ arg_count++;
#endif
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
return SLJIT_SUCCESS;
}
@@ -771,59 +922,109 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET;
- compiler->local_size = (local_size + 15) & ~0xf;
+ local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 0)
+ + GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+
+ if (!(options & SLJIT_ENTER_REG_ARG))
+ local_size += SSIZE_OF(sw);
+
+ compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf;
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to)
{
- sljit_s32 i, tmp, offs;
-
- CHECK_ERROR();
- CHECK(check_sljit_emit_return(compiler, op, src, srcw));
+ sljit_s32 i, tmp, base, offset;
+ sljit_s32 local_size = compiler->local_size;
+
+ base = SLJIT_SP;
+ if (local_size > STACK_MAX_DISTANCE) {
+ base = TMP_REG1;
+ if (local_size > 2 * STACK_MAX_DISTANCE + LR_SAVE_OFFSET) {
+ FAIL_IF(push_inst(compiler, STACK_LOAD | D(base) | A(SLJIT_SP) | IMM(0)));
+ local_size = 0;
+ } else {
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG1) | A(SLJIT_SP) | IMM(local_size - STACK_MAX_DISTANCE)));
+ local_size = STACK_MAX_DISTANCE;
+ }
+ }
- FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
+ offset = local_size;
+ if (!is_return_to)
+ FAIL_IF(push_inst(compiler, STACK_LOAD | S(0) | A(base) | IMM(offset + LR_SAVE_OFFSET)));
- if (compiler->local_size <= SIMM_MAX)
- FAIL_IF(push_inst(compiler, ADDI | D(SLJIT_SP) | A(SLJIT_SP) | IMM(compiler->local_size)));
- else {
- FAIL_IF(load_immediate(compiler, 0, compiler->local_size));
- FAIL_IF(push_inst(compiler, ADD | D(SLJIT_SP) | A(SLJIT_SP) | B(0)));
+ tmp = SLJIT_FS0 - compiler->fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, LFD | FS(i) | A(base) | IMM(offset)));
}
-#if (defined SLJIT_PPC_STACK_FRAME_V2 && SLJIT_PPC_STACK_FRAME_V2)
- FAIL_IF(push_inst(compiler, STACK_LOAD | D(0) | A(SLJIT_SP) | IMM(2 * sizeof(sljit_sw))));
-#else
- FAIL_IF(push_inst(compiler, STACK_LOAD | D(0) | A(SLJIT_SP) | IMM(sizeof(sljit_sw))));
-#endif
+ for (i = compiler->fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, LFD | FS(i) | A(base) | IMM(offset)));
+ }
- offs = -(sljit_s32)GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 1);
+ if (!(compiler->options & SLJIT_ENTER_REG_ARG)) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STACK_LOAD | S(TMP_ZERO) | A(base) | IMM(offset)));
+ }
- tmp = compiler->scratches;
- for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) {
- FAIL_IF(push_inst(compiler, STACK_LOAD | D(i) | A(SLJIT_SP) | IMM(offs)));
- offs += (sljit_s32)(sizeof(sljit_sw));
+ tmp = SLJIT_S0 - compiler->saveds;
+ for (i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); i > tmp; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STACK_LOAD | S(i) | A(base) | IMM(offset)));
}
- tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = tmp; i <= SLJIT_S0; i++) {
- FAIL_IF(push_inst(compiler, STACK_LOAD | D(i) | A(SLJIT_SP) | IMM(offs)));
- offs += (sljit_s32)(sizeof(sljit_sw));
+ for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STACK_LOAD | S(i) | A(base) | IMM(offset)));
}
- FAIL_IF(push_inst(compiler, STACK_LOAD | D(TMP_ZERO) | A(SLJIT_SP) | IMM(offs)));
- SLJIT_ASSERT(offs == -(sljit_sw)(sizeof(sljit_sw)));
+ if (!is_return_to)
+ push_inst(compiler, MTLR | S(0));
- FAIL_IF(push_inst(compiler, MTLR | S(0)));
- FAIL_IF(push_inst(compiler, BLR));
+ if (local_size > 0)
+ return push_inst(compiler, ADDI | D(SLJIT_SP) | A(base) | IMM(local_size));
- return SLJIT_SUCCESS;
+ SLJIT_ASSERT(base == TMP_REG1);
+ return push_inst(compiler, OR | S(base) | A(SLJIT_SP) | B(base));
}
#undef STACK_STORE
#undef STACK_LOAD
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
+ return push_inst(compiler, BLR);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_to(compiler, src, srcw));
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_CALL_REG, src, srcw, TMP_CALL_REG));
+ src = TMP_CALL_REG;
+ srcw = 0;
+ } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, OR | S(src) | A(TMP_CALL_REG) | B(src)));
+ src = TMP_CALL_REG;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 1));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
+}
+
/* --------------------------------------------------------------------- */
/* Operators */
/* --------------------------------------------------------------------- */
@@ -843,11 +1044,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
#define ARCH_32_64(a, b) a
#define INST_CODE_AND_DST(inst, flags, reg) \
- ((inst) | (((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)))
+ ((sljit_ins)(inst) | (sljit_ins)(((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)))
#else
#define ARCH_32_64(a, b) b
#define INST_CODE_AND_DST(inst, flags, reg) \
- (((inst) & ~INT_ALIGNED) | (((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)))
+ (((sljit_ins)(inst) & ~(sljit_ins)INT_ALIGNED) | (sljit_ins)(((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)))
#endif
static const sljit_ins data_transfer_insts[64 + 16] = {
@@ -988,7 +1189,6 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag
{
sljit_ins inst;
sljit_s32 offs_reg;
- sljit_sw high_short;
/* Should work when (arg & REG_MASK) == 0. */
SLJIT_ASSERT(A(0) == 0);
@@ -999,11 +1199,7 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag
offs_reg = OFFS_REG(arg);
if (argw != 0) {
-#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
- FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(arg)) | A(tmp_reg) | (argw << 11) | ((31 - argw) << 1)));
-#else
- FAIL_IF(push_inst(compiler, RLDI(tmp_reg, OFFS_REG(arg), argw, 63 - argw, 1)));
-#endif
+ FAIL_IF(push_inst(compiler, SLWI_W(argw) | S(OFFS_REG(arg)) | A(tmp_reg)));
offs_reg = tmp_reg;
}
@@ -1011,7 +1207,7 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
SLJIT_ASSERT(!(inst & INT_ALIGNED));
-#endif
+#endif /* SLJIT_CONFIG_PPC_64 */
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(offs_reg));
}
@@ -1026,36 +1222,24 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag
inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg));
}
-#endif
+#endif /* SLJIT_CONFIG_PPC_64 */
if (argw <= SIMM_MAX && argw >= SIMM_MIN)
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | IMM(argw));
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
if (argw <= 0x7fff7fffl && argw >= -0x80000000l) {
-#endif
-
- high_short = (sljit_s32)(argw + ((argw & 0x8000) << 1)) & ~0xffff;
-
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- SLJIT_ASSERT(high_short && high_short <= 0x7fffffffl && high_short >= -0x80000000l);
-#else
- SLJIT_ASSERT(high_short);
-#endif
-
- FAIL_IF(push_inst(compiler, ADDIS | D(tmp_reg) | A(arg) | IMM(high_short >> 16)));
+#endif /* SLJIT_CONFIG_PPC_64 */
+ FAIL_IF(push_inst(compiler, ADDIS | D(tmp_reg) | A(arg) | IMM((argw + 0x8000) >> 16)));
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(tmp_reg) | IMM(argw));
-
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
}
- /* The rest is PPC-64 only. */
-
FAIL_IF(load_immediate(compiler, tmp_reg, argw));
inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg));
-#endif
+#endif /* SLJIT_CONFIG_PPC_64 */
}
static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 input_flags,
@@ -1073,8 +1257,10 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
sljit_s32 flags = input_flags & (ALT_FORM1 | ALT_FORM2 | ALT_FORM3 | ALT_FORM4 | ALT_FORM5 | ALT_SIGN_EXT | ALT_SET_FLAGS);
/* Destination check. */
- if (SLOW_IS_REG(dst)) {
+ if (FAST_IS_REG(dst)) {
dst_r = dst;
+ /* The REG_DEST is only used by SLJIT_MOV operations, although
+ * it is set for op2 operations with unset destination. */
flags |= REG_DEST;
if (op >= SLJIT_MOV && op <= SLJIT_MOV_P)
@@ -1086,9 +1272,12 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
src1_r = src1;
flags |= REG1_SOURCE;
}
- else if (src1 & SLJIT_IMM) {
- FAIL_IF(load_immediate(compiler, TMP_REG1, src1w));
- src1_r = TMP_REG1;
+ else if (src1 == SLJIT_IMM) {
+ src1_r = TMP_ZERO;
+ if (src1w != 0) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, src1w));
+ src1_r = TMP_REG1;
+ }
}
else {
FAIL_IF(emit_op_mem(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, TMP_REG1));
@@ -1103,9 +1292,12 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P)
dst_r = src2_r;
}
- else if (src2 & SLJIT_IMM) {
- FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w));
- src2_r = sugg_src2_r;
+ else if (src2 == SLJIT_IMM) {
+ src2_r = TMP_ZERO;
+ if (src2w != 0) {
+ FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w));
+ src2_r = sugg_src2_r;
+ }
}
else {
FAIL_IF(emit_op_mem(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w, TMP_REG2));
@@ -1123,7 +1315,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
{
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- sljit_s32 int_op = op & SLJIT_I32_OP;
+ sljit_s32 int_op = op & SLJIT_32;
#endif
CHECK_ERROR();
@@ -1170,33 +1362,161 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
return SLJIT_SUCCESS;
}
-static sljit_s32 emit_prefetch(struct sljit_compiler *compiler,
- sljit_s32 src, sljit_sw srcw)
+static sljit_s32 emit_rev(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
{
- if (!(src & OFFS_REG_MASK)) {
- if (srcw == 0 && (src & REG_MASK) != SLJIT_UNUSED)
- return push_inst(compiler, DCBT | A(0) | B(src & REG_MASK));
+ sljit_s32 mem, offs_reg, inp_flags;
+ sljit_sw memw;
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ sljit_s32 is_32 = op & SLJIT_32;
- FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
- /* Works with SLJIT_MEM0() case as well. */
- return push_inst(compiler, DCBT | A(src & REG_MASK) | B(TMP_REG1));
+ op = GET_OPCODE(op);
+#endif /* SLJIT_CONFIG_PPC_64 */
+
+ if (!((dst | src) & SLJIT_MEM)) {
+ /* Both are registers. */
+ if (op == SLJIT_REV_U16 || op == SLJIT_REV_S16) {
+ if (src == dst) {
+ FAIL_IF(push_inst(compiler, RLWIMI | S(dst) | A(dst) | RLWI_SH(16) | RLWI_MBE(8, 15)));
+ FAIL_IF(push_inst(compiler, RLWINM | S(dst) | A(dst) | RLWI_SH(24) | RLWI_MBE(16, 31)));
+ } else {
+ FAIL_IF(push_inst(compiler, RLWINM | S(src) | A(dst) | RLWI_SH(8) | RLWI_MBE(16, 23)));
+ FAIL_IF(push_inst(compiler, RLWIMI | S(src) | A(dst) | RLWI_SH(24) | RLWI_MBE(24, 31)));
+ }
+
+ if (op == SLJIT_REV_U16)
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, EXTSH | S(dst) | A(dst));
+ }
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ if (!is_32) {
+#if defined(_ARCH_PWR10) && _ARCH_PWR10
+ return push_inst(compiler, BRD | S(src) | A(dst));
+#else /* !POWER10 */
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG2) | A(0) | IMM(TMP_MEM_OFFSET_HI)));
+ FAIL_IF(push_inst(compiler, RLDICL | S(src) | A(TMP_REG1) | RLDI_SH(32) | RLDI_MB(32)));
+ FAIL_IF(push_inst(compiler, STWBRX | S(src) | A(SLJIT_SP) | B(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG2) | A(0) | IMM(TMP_MEM_OFFSET_LO)));
+ FAIL_IF(push_inst(compiler, STWBRX | S(TMP_REG1) | A(SLJIT_SP) | B(TMP_REG2)));
+ return push_inst(compiler, LD | D(dst) | A(SLJIT_SP) | TMP_MEM_OFFSET);
+#endif /* POWER10 */
+ }
+#endif /* SLJIT_CONFIG_PPC_64 */
+
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG2) | A(0) | IMM(TMP_MEM_OFFSET)));
+ FAIL_IF(push_inst(compiler, STWBRX | S(src) | A(SLJIT_SP) | B(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, LWZ | D(dst) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ if (op == SLJIT_REV_S32)
+ return push_inst(compiler, EXTSW | S(dst) | A(dst));
+#endif /* SLJIT_CONFIG_PPC_64 */
+ return SLJIT_SUCCESS;
}
- srcw &= 0x3;
+ mem = src;
+ memw = srcw;
- if (srcw == 0)
- return push_inst(compiler, DCBT | A(src & REG_MASK) | B(OFFS_REG(src)));
+ if (dst & SLJIT_MEM) {
+ mem = dst;
+ memw = dstw;
-#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
- FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(src)) | A(TMP_REG1) | (srcw << 11) | ((31 - srcw) << 1)));
-#else
- FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, OFFS_REG(src), srcw, 63 - srcw, 1)));
-#endif
- return push_inst(compiler, DCBT | A(src & REG_MASK) | B(TMP_REG1));
+ if (src & SLJIT_MEM) {
+ inp_flags = HALF_DATA | LOAD_DATA;
+
+ if (op != SLJIT_REV_U16 && op != SLJIT_REV_S16) {
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ inp_flags = (is_32 ? INT_DATA : WORD_DATA) | LOAD_DATA;
+#else /* !SLJIT_CONFIG_PPC_64 */
+ inp_flags = WORD_DATA | LOAD_DATA;
+#endif /* SLJIT_CONFIG_PPC_64 */
+ }
+
+ FAIL_IF(emit_op_mem(compiler, inp_flags, TMP_REG1, src, srcw, TMP_REG2));
+ src = TMP_REG1;
+ }
+ }
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ offs_reg = OFFS_REG(mem);
+ mem &= REG_MASK;
+ memw &= 0x3;
+
+ if (memw != 0) {
+ FAIL_IF(push_inst(compiler, SLWI_W(memw) | S(offs_reg) | A(TMP_REG2)));
+ offs_reg = TMP_REG2;
+ }
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ } else if (memw > 0x7fff7fffl || memw < -0x80000000l) {
+ FAIL_IF(load_immediate(compiler, TMP_REG2, memw));
+ offs_reg = TMP_REG2;
+ mem &= REG_MASK;
+#endif /* SLJIT_CONFIG_PPC_64 */
+ } else {
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG2) | A(mem & REG_MASK) | IMM(memw)));
+ if (memw > SIMM_MAX || memw < SIMM_MIN)
+ FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG2) | A(TMP_REG2) | IMM((memw + 0x8000) >> 16)));
+
+ mem = 0;
+ offs_reg = TMP_REG2;
+ }
+
+ if (op == SLJIT_REV_U16 || op == SLJIT_REV_S16) {
+ if (dst & SLJIT_MEM)
+ return push_inst(compiler, STHBRX | S(src) | A(mem) | B(offs_reg));
+
+ FAIL_IF(push_inst(compiler, LHBRX | S(dst) | A(mem) | B(offs_reg)));
+
+ if (op == SLJIT_REV_U16)
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, EXTSH | S(dst) | A(dst));
+ }
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ if (!is_32) {
+ if (dst & SLJIT_MEM) {
+#if defined(_ARCH_PWR7) && _ARCH_PWR7
+ return push_inst(compiler, STDBRX | S(src) | A(mem) | B(offs_reg));
+#else /* !POWER7 */
+#if defined(SLJIT_LITTLE_ENDIAN) && SLJIT_LITTLE_ENDIAN
+ FAIL_IF(push_inst(compiler, RLDICL | S(src) | A(TMP_REG1) | RLDI_SH(32) | RLDI_MB(32)));
+ FAIL_IF(push_inst(compiler, STWBRX | S(TMP_REG1) | A(mem) | B(offs_reg)));
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG2) | A(offs_reg) | IMM(SSIZE_OF(s32))));
+ return push_inst(compiler, STWBRX | S(src) | A(mem) | B(TMP_REG2));
+#else /* !SLJIT_LITTLE_ENDIAN */
+ FAIL_IF(push_inst(compiler, STWBRX | S(src) | A(mem) | B(offs_reg)));
+ FAIL_IF(push_inst(compiler, RLDICL | S(src) | A(TMP_REG1) | RLDI_SH(32) | RLDI_MB(32)));
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG2) | A(offs_reg) | IMM(SSIZE_OF(s32))));
+ return push_inst(compiler, STWBRX | S(TMP_REG1) | A(mem) | B(TMP_REG2));
+#endif /* SLJIT_LITTLE_ENDIAN */
+#endif /* POWER7 */
+ }
+#if defined(_ARCH_PWR7) && _ARCH_PWR7
+ return push_inst(compiler, LDBRX | S(dst) | A(mem) | B(offs_reg));
+#else /* !POWER7 */
+ FAIL_IF(push_inst(compiler, LWBRX | LWBRX_FIRST_REG | A(mem) | B(offs_reg)));
+ FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG2) | A(offs_reg) | IMM(SSIZE_OF(s32))));
+ FAIL_IF(push_inst(compiler, LWBRX | LWBRX_SECOND_REG | A(mem) | B(TMP_REG2)));
+ return push_inst(compiler, RLDIMI | S(TMP_REG1) | A(dst) | RLDI_SH(32) | RLDI_MB(0));
+#endif /* POWER7 */
+ }
+#endif /* SLJIT_CONFIG_PPC_64 */
+
+ if (dst & SLJIT_MEM)
+ return push_inst(compiler, STWBRX | S(src) | A(mem) | B(offs_reg));
+
+ FAIL_IF(push_inst(compiler, LWBRX | S(dst) | A(mem) | B(offs_reg)));
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ if (op == SLJIT_REV_S32)
+ return push_inst(compiler, EXTSW | S(dst) | A(dst));
+#endif /* SLJIT_CONFIG_PPC_64 */
+ return SLJIT_SUCCESS;
}
#define EMIT_MOV(type, type_flags, type_cast) \
- emit_op(compiler, (src & SLJIT_IMM) ? SLJIT_MOV : type, flags | (type_flags), dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? type_cast srcw : srcw)
+ emit_op(compiler, (src == SLJIT_IMM) ? SLJIT_MOV : type, flags | (type_flags), dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? type_cast srcw : srcw)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
@@ -1211,25 +1531,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
ADJUST_LOCAL_OFFSET(src, srcw);
op = GET_OPCODE(op);
- if ((src & SLJIT_IMM) && srcw == 0)
- src = TMP_ZERO;
if (GET_FLAG_TYPE(op_flags) == SLJIT_OVERFLOW)
FAIL_IF(push_inst(compiler, MTXER | S(TMP_ZERO)));
- if (op < SLJIT_NOT && FAST_IS_REG(src) && src == dst) {
+ if (op <= SLJIT_MOV_P && FAST_IS_REG(src) && src == dst) {
if (!TYPE_CAST_NEEDED(op))
return SLJIT_SUCCESS;
}
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- if (op_flags & SLJIT_I32_OP) {
- if (op < SLJIT_NOT) {
+ if (op_flags & SLJIT_32) {
+ if (op <= SLJIT_MOV_P) {
if (src & SLJIT_MEM) {
if (op == SLJIT_MOV_S32)
op = SLJIT_MOV_U32;
}
- else if (src & SLJIT_IMM) {
+ else if (src == SLJIT_IMM) {
if (op == SLJIT_MOV_U32)
op = SLJIT_MOV_S32;
}
@@ -1245,11 +1563,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
switch (op) {
case SLJIT_MOV:
- case SLJIT_MOV_P:
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
#endif
+ case SLJIT_MOV_P:
return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
@@ -1257,6 +1576,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
return EMIT_MOV(SLJIT_MOV_U32, INT_DATA, (sljit_u32));
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
return EMIT_MOV(SLJIT_MOV_S32, INT_DATA | SIGNED_DATA, (sljit_s32));
#endif
@@ -1272,18 +1592,26 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
case SLJIT_MOV_S16:
return EMIT_MOV(SLJIT_MOV_S16, HALF_DATA | SIGNED_DATA, (sljit_s16));
- case SLJIT_NOT:
- return emit_op(compiler, SLJIT_NOT, flags, dst, dstw, TMP_REG1, 0, src, srcw);
-
- case SLJIT_NEG:
- return emit_op(compiler, SLJIT_NEG, flags | (GET_FLAG_TYPE(op_flags) ? ALT_FORM1 : 0), dst, dstw, TMP_REG1, 0, src, srcw);
-
case SLJIT_CLZ:
+ case SLJIT_CTZ:
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- return emit_op(compiler, SLJIT_CLZ, flags | (!(op_flags & SLJIT_I32_OP) ? 0 : ALT_FORM1), dst, dstw, TMP_REG1, 0, src, srcw);
-#else
- return emit_op(compiler, SLJIT_CLZ, flags, dst, dstw, TMP_REG1, 0, src, srcw);
-#endif
+ if (op_flags & SLJIT_32)
+ flags |= ALT_FORM1;
+#endif /* SLJIT_CONFIG_PPC_64 */
+ return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ op |= SLJIT_32;
+#endif /* SLJIT_CONFIG_PPC_64 */
+ /* fallthrough */
+ case SLJIT_REV:
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ op |= (op_flags & SLJIT_32);
+#endif /* SLJIT_CONFIG_PPC_64 */
+ return emit_rev(compiler, op, dst, dstw, src, srcw);
}
return SLJIT_SUCCESS;
@@ -1291,57 +1619,47 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
#undef EMIT_MOV
+/* Macros for checking different operand types / values. */
#define TEST_SL_IMM(src, srcw) \
- (((src) & SLJIT_IMM) && (srcw) <= SIMM_MAX && (srcw) >= SIMM_MIN)
-
+ ((src) == SLJIT_IMM && (srcw) <= SIMM_MAX && (srcw) >= SIMM_MIN)
#define TEST_UL_IMM(src, srcw) \
- (((src) & SLJIT_IMM) && !((srcw) & ~0xffff))
-
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
-#define TEST_SH_IMM(src, srcw) \
- (((src) & SLJIT_IMM) && !((srcw) & 0xffff) && (srcw) <= 0x7fffffffl && (srcw) >= -0x80000000l)
-#else
-#define TEST_SH_IMM(src, srcw) \
- (((src) & SLJIT_IMM) && !((srcw) & 0xffff))
-#endif
-
+ ((src) == SLJIT_IMM && !((srcw) & ~0xffff))
#define TEST_UH_IMM(src, srcw) \
- (((src) & SLJIT_IMM) && !((srcw) & ~0xffff0000))
+ ((src) == SLJIT_IMM && !((srcw) & ~(sljit_sw)0xffff0000))
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+#define TEST_SH_IMM(src, srcw) \
+ ((src) == SLJIT_IMM && !((srcw) & 0xffff) && (srcw) <= 0x7fffffffl && (srcw) >= -0x80000000l)
#define TEST_ADD_IMM(src, srcw) \
- (((src) & SLJIT_IMM) && (srcw) <= 0x7fff7fffl && (srcw) >= -0x80000000l)
-#else
-#define TEST_ADD_IMM(src, srcw) \
- ((src) & SLJIT_IMM)
-#endif
-
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
-#define TEST_UI_IMM(src, srcw) \
- (((src) & SLJIT_IMM) && !((srcw) & ~0xffffffff))
-#else
+ ((src) == SLJIT_IMM && (srcw) <= 0x7fff7fffl && (srcw) >= -0x80000000l)
#define TEST_UI_IMM(src, srcw) \
- ((src) & SLJIT_IMM)
-#endif
+ ((src) == SLJIT_IMM && !((srcw) & ~0xffffffff))
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
#define TEST_ADD_FORM1(op) \
(GET_FLAG_TYPE(op) == SLJIT_OVERFLOW \
- || (op & (SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK)) == (SLJIT_I32_OP | SLJIT_SET_Z | SLJIT_SET_CARRY))
+ || (op & (SLJIT_32 | SLJIT_SET_Z | VARIABLE_FLAG_MASK)) == (SLJIT_32 | SLJIT_SET_Z | SLJIT_SET_CARRY))
#define TEST_SUB_FORM2(op) \
((GET_FLAG_TYPE(op) >= SLJIT_SIG_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) \
- || (op & (SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK)) == (SLJIT_I32_OP | SLJIT_SET_Z))
+ || (op & (SLJIT_32 | SLJIT_SET_Z | VARIABLE_FLAG_MASK)) == (SLJIT_32 | SLJIT_SET_Z))
#define TEST_SUB_FORM3(op) \
(GET_FLAG_TYPE(op) == SLJIT_OVERFLOW \
- || (op & (SLJIT_I32_OP | SLJIT_SET_Z)) == (SLJIT_I32_OP | SLJIT_SET_Z))
-#else
+ || (op & (SLJIT_32 | SLJIT_SET_Z)) == (SLJIT_32 | SLJIT_SET_Z))
+
+#else /* !SLJIT_CONFIG_PPC_64 */
+#define TEST_SH_IMM(src, srcw) \
+ ((src) == SLJIT_IMM && !((srcw) & 0xffff))
+#define TEST_ADD_IMM(src, srcw) \
+ ((src) == SLJIT_IMM)
+#define TEST_UI_IMM(src, srcw) \
+ ((src) == SLJIT_IMM)
+
#define TEST_ADD_FORM1(op) \
(GET_FLAG_TYPE(op) == SLJIT_OVERFLOW)
#define TEST_SUB_FORM2(op) \
(GET_FLAG_TYPE(op) >= SLJIT_SIG_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL)
#define TEST_SUB_FORM3(op) \
(GET_FLAG_TYPE(op) == SLJIT_OVERFLOW)
-#endif
+#endif /* SLJIT_CONFIG_PPC_64 */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
@@ -1351,26 +1669,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
sljit_s32 flags = HAS_FLAGS(op) ? ALT_SET_FLAGS : 0;
CHECK_ERROR();
- CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
- return SLJIT_SUCCESS;
-
- if ((src1 & SLJIT_IMM) && src1w == 0)
- src1 = TMP_ZERO;
- if ((src2 & SLJIT_IMM) && src2w == 0)
- src2 = TMP_ZERO;
-
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- if (op & SLJIT_I32_OP) {
+ if (op & SLJIT_32) {
/* Most operations expect sign extended arguments. */
flags |= INT_DATA | SIGNED_DATA;
- if (src1 & SLJIT_IMM)
+ if (src1 == SLJIT_IMM)
src1w = (sljit_s32)(src1w);
- if (src2 & SLJIT_IMM)
+ if (src2 == SLJIT_IMM)
src2w = (sljit_s32)(src2w);
if (HAS_FLAGS(op))
flags |= ALT_SIGN_EXT;
@@ -1381,45 +1691,47 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
switch (GET_OPCODE(op)) {
case SLJIT_ADD:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+
if (TEST_ADD_FORM1(op))
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM1, dst, dstw, src1, src1w, src2, src2w);
- if (!HAS_FLAGS(op) && ((src1 | src2) & SLJIT_IMM)) {
+ if (!HAS_FLAGS(op) && (src1 == SLJIT_IMM || src2 == SLJIT_IMM)) {
if (TEST_SL_IMM(src2, src2w)) {
- compiler->imm = src2w & 0xffff;
+ compiler->imm = (sljit_ins)src2w & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_SL_IMM(src1, src1w)) {
- compiler->imm = src1w & 0xffff;
+ compiler->imm = (sljit_ins)src1w & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0);
}
if (TEST_SH_IMM(src2, src2w)) {
- compiler->imm = (src2w >> 16) & 0xffff;
+ compiler->imm = (sljit_ins)(src2w >> 16) & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_SH_IMM(src1, src1w)) {
- compiler->imm = (src1w >> 16) & 0xffff;
+ compiler->imm = (sljit_ins)(src1w >> 16) & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM3, dst, dstw, src2, src2w, TMP_REG2, 0);
}
/* Range between -1 and -32768 is covered above. */
if (TEST_ADD_IMM(src2, src2w)) {
- compiler->imm = src2w & 0xffffffff;
+ compiler->imm = (sljit_ins)src2w & 0xffffffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM4, dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_ADD_IMM(src1, src1w)) {
- compiler->imm = src1w & 0xffffffff;
+ compiler->imm = (sljit_ins)src1w & 0xffffffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM4, dst, dstw, src2, src2w, TMP_REG2, 0);
}
}
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- if ((op & (SLJIT_I32_OP | SLJIT_SET_Z)) == (SLJIT_I32_OP | SLJIT_SET_Z)) {
+ if ((op & (SLJIT_32 | SLJIT_SET_Z)) == (SLJIT_32 | SLJIT_SET_Z)) {
if (TEST_SL_IMM(src2, src2w)) {
- compiler->imm = src2w & 0xffff;
+ compiler->imm = (sljit_ins)src2w & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM4 | ALT_FORM5, dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_SL_IMM(src1, src1w)) {
- compiler->imm = src1w & 0xffff;
+ compiler->imm = (sljit_ins)src1w & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM4 | ALT_FORM5, dst, dstw, src2, src2w, TMP_REG2, 0);
}
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM4, dst, dstw, src1, src1w, src2, src2w);
@@ -1427,47 +1739,50 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
#endif
if (HAS_FLAGS(op)) {
if (TEST_SL_IMM(src2, src2w)) {
- compiler->imm = src2w & 0xffff;
+ compiler->imm = (sljit_ins)src2w & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_SL_IMM(src1, src1w)) {
- compiler->imm = src1w & 0xffff;
+ compiler->imm = (sljit_ins)src1w & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM3, dst, dstw, src2, src2w, TMP_REG2, 0);
}
}
- return emit_op(compiler, SLJIT_ADD, flags | ((GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY)) ? ALT_FORM5 : 0), dst, dstw, src1, src1w, src2, src2w);
+ return emit_op(compiler, SLJIT_ADD, flags | ((GET_FLAG_TYPE(op) == SLJIT_CARRY) ? ALT_FORM5 : 0), dst, dstw, src1, src1w, src2, src2w);
case SLJIT_ADDC:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
return emit_op(compiler, SLJIT_ADDC, flags, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUB:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
+
if (GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_LESS_EQUAL) {
- if (dst == SLJIT_UNUSED) {
+ if (dst == TMP_REG2) {
if (TEST_UL_IMM(src2, src2w)) {
- compiler->imm = src2w & 0xffff;
+ compiler->imm = (sljit_ins)src2w & 0xffff;
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1 | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0);
}
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1, dst, dstw, src1, src1w, src2, src2w);
}
- if ((src2 & SLJIT_IMM) && src2w >= 0 && src2w <= (SIMM_MAX + 1)) {
- compiler->imm = src2w;
+ if (src2 == SLJIT_IMM && src2w >= 0 && src2w <= (SIMM_MAX + 1)) {
+ compiler->imm = (sljit_ins)src2w;
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1 | ALT_FORM2 | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0);
}
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1 | ALT_FORM3, dst, dstw, src1, src1w, src2, src2w);
}
- if (dst == SLJIT_UNUSED && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) {
+ if (dst == TMP_REG2 && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) {
if (TEST_SL_IMM(src2, src2w)) {
- compiler->imm = src2w & 0xffff;
+ compiler->imm = (sljit_ins)src2w & 0xffff;
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2 | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0);
}
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2, dst, dstw, src1, src1w, src2, src2w);
}
if (TEST_SUB_FORM2(op)) {
- if ((src2 & SLJIT_IMM) && src2w >= -SIMM_MAX && src2w <= SIMM_MAX) {
- compiler->imm = src2w & 0xffff;
+ if (src2 == SLJIT_IMM && src2w >= -SIMM_MAX && src2w <= SIMM_MAX) {
+ compiler->imm = (sljit_ins)src2w & 0xffff;
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2 | ALT_FORM3 | ALT_FORM4, dst, dstw, src1, src1w, TMP_REG2, 0);
}
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2 | ALT_FORM4, dst, dstw, src1, src1w, src2, src2w);
@@ -1477,45 +1792,46 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM3, dst, dstw, src1, src1w, src2, src2w);
if (TEST_SL_IMM(src2, -src2w)) {
- compiler->imm = (-src2w) & 0xffff;
+ compiler->imm = (sljit_ins)(-src2w) & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | (!HAS_FLAGS(op) ? ALT_FORM2 : ALT_FORM3), dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_SL_IMM(src1, src1w) && !(op & SLJIT_SET_Z)) {
- compiler->imm = src1w & 0xffff;
+ compiler->imm = (sljit_ins)src1w & 0xffff;
return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM4, dst, dstw, src2, src2w, TMP_REG2, 0);
}
if (!HAS_FLAGS(op)) {
if (TEST_SH_IMM(src2, -src2w)) {
- compiler->imm = ((-src2w) >> 16) & 0xffff;
+ compiler->imm = (sljit_ins)((-src2w) >> 16) & 0xffff;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0);
}
/* Range between -1 and -32768 is covered above. */
if (TEST_ADD_IMM(src2, -src2w)) {
- compiler->imm = -src2w & 0xffffffff;
+ compiler->imm = (sljit_ins)-src2w;
return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM4, dst, dstw, src1, src1w, TMP_REG2, 0);
}
}
- /* We know ALT_SIGN_EXT is set if it is an SLJIT_I32_OP on 64 bit systems. */
- return emit_op(compiler, SLJIT_SUB, flags | ((GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY)) ? ALT_FORM5 : 0), dst, dstw, src1, src1w, src2, src2w);
+ /* We know ALT_SIGN_EXT is set if it is an SLJIT_32 on 64 bit systems. */
+ return emit_op(compiler, SLJIT_SUB, flags | ((GET_FLAG_TYPE(op) == SLJIT_CARRY) ? ALT_FORM5 : 0), dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUBC:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
return emit_op(compiler, SLJIT_SUBC, flags, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_MUL:
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- if (op & SLJIT_I32_OP)
+ if (op & SLJIT_32)
flags |= ALT_FORM2;
#endif
if (!HAS_FLAGS(op)) {
if (TEST_SL_IMM(src2, src2w)) {
- compiler->imm = src2w & 0xffff;
+ compiler->imm = (sljit_ins)src2w & 0xffff;
return emit_op(compiler, SLJIT_MUL, flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_SL_IMM(src1, src1w)) {
- compiler->imm = src1w & 0xffff;
+ compiler->imm = (sljit_ins)src1w & 0xffff;
return emit_op(compiler, SLJIT_MUL, flags | ALT_FORM1, dst, dstw, src2, src2w, TMP_REG2, 0);
}
}
@@ -1523,50 +1839,62 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
FAIL_IF(push_inst(compiler, MTXER | S(TMP_ZERO)));
return emit_op(compiler, SLJIT_MUL, flags, dst, dstw, src1, src1w, src2, src2w);
+ case SLJIT_XOR:
+ if (src2 == SLJIT_IMM && src2w == -1) {
+ return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM4, dst, dstw, TMP_REG1, 0, src1, src1w);
+ }
+ if (src1 == SLJIT_IMM && src1w == -1) {
+ return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM4, dst, dstw, TMP_REG1, 0, src2, src2w);
+ }
+ /* fallthrough */
case SLJIT_AND:
case SLJIT_OR:
- case SLJIT_XOR:
/* Commutative unsigned operations. */
if (!HAS_FLAGS(op) || GET_OPCODE(op) == SLJIT_AND) {
if (TEST_UL_IMM(src2, src2w)) {
- compiler->imm = src2w;
+ compiler->imm = (sljit_ins)src2w;
return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_UL_IMM(src1, src1w)) {
- compiler->imm = src1w;
+ compiler->imm = (sljit_ins)src1w;
return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM1, dst, dstw, src2, src2w, TMP_REG2, 0);
}
if (TEST_UH_IMM(src2, src2w)) {
- compiler->imm = (src2w >> 16) & 0xffff;
+ compiler->imm = (sljit_ins)(src2w >> 16) & 0xffff;
return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_UH_IMM(src1, src1w)) {
- compiler->imm = (src1w >> 16) & 0xffff;
+ compiler->imm = (sljit_ins)(src1w >> 16) & 0xffff;
return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0);
}
}
- if (GET_OPCODE(op) != SLJIT_AND && GET_OPCODE(op) != SLJIT_AND) {
- /* Unlike or and xor, and resets unwanted bits as well. */
+ if (!HAS_FLAGS(op) && GET_OPCODE(op) != SLJIT_AND) {
+ /* Unlike or and xor, the and resets unwanted bits as well. */
if (TEST_UI_IMM(src2, src2w)) {
- compiler->imm = src2w;
+ compiler->imm = (sljit_ins)src2w;
return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0);
}
if (TEST_UI_IMM(src1, src1w)) {
- compiler->imm = src1w;
+ compiler->imm = (sljit_ins)src1w;
return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM3, dst, dstw, src2, src2w, TMP_REG2, 0);
}
}
return emit_op(compiler, GET_OPCODE(op), flags, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SHL:
+ case SLJIT_MSHL:
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
case SLJIT_ASHR:
+ case SLJIT_MASHR:
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- if (op & SLJIT_I32_OP)
+ if (op & SLJIT_32)
flags |= ALT_FORM2;
#endif
- if (src2 & SLJIT_IMM) {
- compiler->imm = src2w;
+ if (src2 == SLJIT_IMM) {
+ compiler->imm = (sljit_ins)src2w;
return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0);
}
return emit_op(compiler, GET_OPCODE(op), flags, dst, dstw, src1, src1w, src2, src2w);
@@ -1575,10 +1903,130 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return SLJIT_SUCCESS;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w);
+}
+
#undef TEST_ADD_FORM1
#undef TEST_SUB_FORM2
#undef TEST_SUB_FORM3
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
+{
+ sljit_s32 is_right;
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ sljit_s32 inp_flags = ((op & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA;
+ sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64;
+#else /* !SLJIT_CONFIG_PPC_64 */
+ sljit_s32 inp_flags = WORD_DATA | LOAD_DATA;
+ sljit_sw bit_length = 32;
+#endif /* SLJIT_CONFIG_PPC_64 */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_shift_into(compiler, op, dst_reg, src1_reg, src2_reg, src3, src3w));
+
+ is_right = (GET_OPCODE(op) == SLJIT_LSHR || GET_OPCODE(op) == SLJIT_MLSHR);
+
+ if (src1_reg == src2_reg) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, (is_right ? SLJIT_ROTR : SLJIT_ROTL) | (op & SLJIT_32), dst_reg, 0, src1_reg, 0, src3, src3w);
+ }
+
+ ADJUST_LOCAL_OFFSET(src3, src3w);
+
+ if (src3 == SLJIT_IMM) {
+ src3w &= bit_length - 1;
+
+ if (src3w == 0)
+ return SLJIT_SUCCESS;
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ if (!(op & SLJIT_32)) {
+ if (is_right) {
+ FAIL_IF(push_inst(compiler, SRDI(src3w) | S(src1_reg) | A(dst_reg)));
+ return push_inst(compiler, RLDIMI | S(src2_reg) | A(dst_reg) | RLDI_SH(64 - src3w) | RLDI_MB(0));
+ }
+
+ FAIL_IF(push_inst(compiler, SLDI(src3w) | S(src1_reg) | A(dst_reg)));
+ /* Computes SRDI(64 - src2w). */
+ FAIL_IF(push_inst(compiler, RLDICL | S(src2_reg) | A(TMP_REG1) | RLDI_SH(src3w) | RLDI_MB(64 - src3w)));
+ return push_inst(compiler, OR | S(dst_reg) | A(dst_reg) | B(TMP_REG1));
+ }
+#endif /* SLJIT_CONFIG_PPC_64 */
+
+ if (is_right) {
+ FAIL_IF(push_inst(compiler, SRWI(src3w) | S(src1_reg) | A(dst_reg)));
+ return push_inst(compiler, RLWIMI | S(src2_reg) | A(dst_reg) | RLWI_SH(32 - src3w) | RLWI_MBE(0, src3w - 1));
+ }
+
+ FAIL_IF(push_inst(compiler, SLWI(src3w) | S(src1_reg) | A(dst_reg)));
+ return push_inst(compiler, RLWIMI | S(src2_reg) | A(dst_reg) | RLWI_SH(src3w) | RLWI_MBE(32 - src3w, 31));
+ }
+
+ if (src3 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, inp_flags, TMP_REG2, src3, src3w, TMP_REG2));
+ src3 = TMP_REG2;
+ }
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ if (!(op & SLJIT_32)) {
+ if (GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR || dst_reg == src3) {
+ FAIL_IF(push_inst(compiler, ANDI | S(src3) | A(TMP_REG2) | 0x3f));
+ src3 = TMP_REG2;
+ }
+
+ FAIL_IF(push_inst(compiler, (is_right ? SRD : SLD) | S(src1_reg) | A(dst_reg) | B(src3)));
+ FAIL_IF(push_inst(compiler, (is_right ? SLDI(1) : SRDI(1)) | S(src2_reg) | A(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, XORI | S(src3) | A(TMP_REG2) | 0x3f));
+ FAIL_IF(push_inst(compiler, (is_right ? SLD : SRD) | S(TMP_REG1) | A(TMP_REG1) | B(TMP_REG2)));
+ return push_inst(compiler, OR | S(dst_reg) | A(dst_reg) | B(TMP_REG1));
+ }
+#endif /* SLJIT_CONFIG_PPC_64 */
+
+ if (GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR || dst_reg == src3) {
+ FAIL_IF(push_inst(compiler, ANDI | S(src3) | A(TMP_REG2) | 0x1f));
+ src3 = TMP_REG2;
+ }
+
+ FAIL_IF(push_inst(compiler, (is_right ? SRW : SLW) | S(src1_reg) | A(dst_reg) | B(src3)));
+ FAIL_IF(push_inst(compiler, (is_right ? SLWI(1) : SRWI(1)) | S(src2_reg) | A(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, XORI | S(src3) | A(TMP_REG2) | 0x1f));
+ FAIL_IF(push_inst(compiler, (is_right ? SLW : SRW) | S(TMP_REG1) | A(TMP_REG1) | B(TMP_REG2)));
+ return push_inst(compiler, OR | S(dst_reg) | A(dst_reg) | B(TMP_REG1));
+}
+
+static sljit_s32 emit_prefetch(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ if (!(src & OFFS_REG_MASK)) {
+ if (srcw == 0 && (src & REG_MASK))
+ return push_inst(compiler, DCBT | A(0) | B(src & REG_MASK));
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
+ /* Works with SLJIT_MEM0() case as well. */
+ return push_inst(compiler, DCBT | A(src & REG_MASK) | B(TMP_REG1));
+ }
+
+ srcw &= 0x3;
+
+ if (srcw == 0)
+ return push_inst(compiler, DCBT | A(src & REG_MASK) | B(OFFS_REG(src)));
+
+ FAIL_IF(push_inst(compiler, SLWI_W(srcw) | S(OFFS_REG(src)) | A(TMP_REG1)));
+ return push_inst(compiler, DCBT | A(src & REG_MASK) | B(TMP_REG1));
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw)
{
@@ -1591,7 +2039,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
if (FAST_IS_REG(src))
FAIL_IF(push_inst(compiler, MTLR | S(src)));
else {
- FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw));
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw, TMP_REG2));
FAIL_IF(push_inst(compiler, MTLR | S(TMP_REG2)));
}
@@ -1608,21 +2056,52 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
{
- CHECK_REG_INDEX(check_sljit_get_register_index(reg));
- return reg_map[reg];
+ sljit_s32 dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_dst(compiler, op, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ switch (op) {
+ case SLJIT_FAST_ENTER:
+ if (FAST_IS_REG(dst))
+ return push_inst(compiler, MFLR | D(dst));
+
+ FAIL_IF(push_inst(compiler, MFLR | D(TMP_REG1)));
+ break;
+ case SLJIT_GET_RETURN_ADDRESS:
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, dst_r, SLJIT_MEM1(SLJIT_SP), compiler->local_size + LR_SAVE_OFFSET, TMP_REG2));
+ break;
+ }
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, WORD_DATA, TMP_REG1, dst, dstw, TMP_REG2);
+
+ return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
{
- CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
+ CHECK_REG_INDEX(check_sljit_get_register_index(type, reg));
+
+ if (type == SLJIT_GP_REGISTER)
+ return reg_map[reg];
+
+ if (type != SLJIT_FLOAT_REGISTER)
+ return -1;
+
return freg_map[reg];
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
+ SLJIT_UNUSED_ARG(size);
+
CHECK_ERROR();
CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
@@ -1633,23 +2112,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
/* Floating point operators */
/* --------------------------------------------------------------------- */
-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 6))
-#define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double)
-
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
-#define FLOAT_TMP_MEM_OFFSET (6 * sizeof(sljit_sw))
-#else
-#define FLOAT_TMP_MEM_OFFSET (2 * sizeof(sljit_sw))
-
-#if (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN)
-#define FLOAT_TMP_MEM_OFFSET_LOW (2 * sizeof(sljit_sw))
-#define FLOAT_TMP_MEM_OFFSET_HI (3 * sizeof(sljit_sw))
-#else
-#define FLOAT_TMP_MEM_OFFSET_LOW (3 * sizeof(sljit_sw))
-#define FLOAT_TMP_MEM_OFFSET_HI (2 * sizeof(sljit_sw))
-#endif
-
-#endif /* SLJIT_CONFIG_PPC_64 */
+#define SELECT_FOP(op, single, double) ((sljit_ins)((op & SLJIT_32) ? single : double))
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
@@ -1667,19 +2130,19 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
if (op == SLJIT_CONV_SW_FROM_F64) {
if (FAST_IS_REG(dst)) {
- FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
- return emit_op_mem(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1);
+ FAIL_IF(push_inst(compiler, STFD | FS(TMP_FREG1) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ return push_inst(compiler, LD | S(dst) | A(SLJIT_SP) | TMP_MEM_OFFSET);
}
return emit_op_mem(compiler, DOUBLE_DATA, TMP_FREG1, dst, dstw, TMP_REG1);
}
-#else
+#else /* !SLJIT_CONFIG_PPC_64 */
FAIL_IF(push_inst(compiler, FCTIWZ | FD(TMP_FREG1) | FB(src)));
-#endif
+#endif /* SLJIT_CONFIG_PPC_64 */
if (FAST_IS_REG(dst)) {
- FAIL_IF(load_immediate(compiler, TMP_REG1, FLOAT_TMP_MEM_OFFSET));
+ FAIL_IF(load_immediate(compiler, TMP_REG1, TMP_MEM_OFFSET));
FAIL_IF(push_inst(compiler, STFIWX | FS(TMP_FREG1) | A(SLJIT_SP) | B(TMP_REG1)));
- return emit_op_mem(compiler, INT_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1);
+ return push_inst(compiler, LWZ | S(dst) | A(SLJIT_SP) | TMP_MEM_OFFSET);
}
SLJIT_ASSERT(dst & SLJIT_MEM);
@@ -1687,22 +2150,16 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
if (dst & OFFS_REG_MASK) {
dstw &= 0x3;
if (dstw) {
-#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
- FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(dst)) | A(TMP_REG1) | (dstw << 11) | ((31 - dstw) << 1)));
-#else
- FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, OFFS_REG(dst), dstw, 63 - dstw, 1)));
-#endif
+ FAIL_IF(push_inst(compiler, SLWI_W(dstw) | S(OFFS_REG(dst)) | A(TMP_REG1)));
dstw = TMP_REG1;
- }
- else
+ } else
dstw = OFFS_REG(dst);
}
else {
if ((dst & REG_MASK) && !dstw) {
dstw = dst & REG_MASK;
dst = 0;
- }
- else {
+ } else {
/* This works regardless we have SLJIT_MEM1 or SLJIT_MEM0. */
FAIL_IF(load_immediate(compiler, TMP_REG1, dstw));
dstw = TMP_REG1;
@@ -1712,84 +2169,6 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
return push_inst(compiler, STFIWX | FS(TMP_FREG1) | A(dst & REG_MASK) | B(dstw));
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src, sljit_sw srcw)
-{
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
-
- sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
-
- if (src & SLJIT_IMM) {
- if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
- srcw = (sljit_s32)srcw;
- FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
- src = TMP_REG1;
- }
- else if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) {
- if (FAST_IS_REG(src))
- FAIL_IF(push_inst(compiler, EXTSW | S(src) | A(TMP_REG1)));
- else
- FAIL_IF(emit_op_mem(compiler, INT_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
- src = TMP_REG1;
- }
-
- if (FAST_IS_REG(src)) {
- FAIL_IF(emit_op_mem(compiler, WORD_DATA, src, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
- FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
- }
- else
- FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, TMP_REG1));
-
- FAIL_IF(push_inst(compiler, FCFID | FD(dst_r) | FB(TMP_FREG1)));
-
- if (dst & SLJIT_MEM)
- return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1);
- if (op & SLJIT_F32_OP)
- return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r));
- return SLJIT_SUCCESS;
-
-#else
-
- sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
- sljit_s32 invert_sign = 1;
-
- if (src & SLJIT_IMM) {
- FAIL_IF(load_immediate(compiler, TMP_REG1, srcw ^ 0x80000000));
- src = TMP_REG1;
- invert_sign = 0;
- }
- else if (!FAST_IS_REG(src)) {
- FAIL_IF(emit_op_mem(compiler, WORD_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
- src = TMP_REG1;
- }
-
- /* First, a special double floating point value is constructed: (2^53 + (input xor (2^31)))
- The double precision format has exactly 53 bit precision, so the lower 32 bit represents
- the lower 32 bit of such value. The result of xor 2^31 is the same as adding 0x80000000
- to the input, which shifts it into the 0 - 0xffffffff range. To get the converted floating
- point value, we need to substract 2^53 + 2^31 from the constructed value. */
- FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG2) | A(0) | 0x4330));
- if (invert_sign)
- FAIL_IF(push_inst(compiler, XORIS | S(src) | A(TMP_REG1) | 0x8000));
- FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_HI, TMP_REG1));
- FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW, TMP_REG2));
- FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG1) | A(0) | 0x8000));
- FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
- FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW, TMP_REG2));
- FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG2, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
-
- FAIL_IF(push_inst(compiler, FSUB | FD(dst_r) | FA(TMP_FREG1) | FB(TMP_FREG2)));
-
- if (dst & SLJIT_MEM)
- return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1);
- if (op & SLJIT_F32_OP)
- return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r));
- return SLJIT_SUCCESS;
-
-#endif
-}
-
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
@@ -1804,7 +2183,18 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
src2 = TMP_FREG2;
}
- return push_inst(compiler, FCMPU | CRD(4) | FA(src1) | FB(src2));
+ FAIL_IF(push_inst(compiler, FCMPU | CRD(4) | FA(src1) | FB(src2)));
+
+ switch (GET_FLAG_TYPE(op)) {
+ case SLJIT_UNORDERED_OR_EQUAL:
+ return push_inst(compiler, CROR | ((4 + 2) << 21) | ((4 + 2) << 16) | ((4 + 3) << 11));
+ case SLJIT_UNORDERED_OR_LESS:
+ return push_inst(compiler, CROR | ((4 + 0) << 21) | ((4 + 0) << 16) | ((4 + 3) << 11));
+ case SLJIT_UNORDERED_OR_GREATER:
+ return push_inst(compiler, CROR | ((4 + 1) << 21) | ((4 + 1) << 16) | ((4 + 3) << 11));
+ }
+
+ return SLJIT_SUCCESS;
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1815,11 +2205,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
CHECK_ERROR();
- SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_32 == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
@@ -1830,8 +2220,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
switch (GET_OPCODE(op)) {
case SLJIT_CONV_F64_FROM_F32:
- op ^= SLJIT_F32_OP;
- if (op & SLJIT_F32_OP) {
+ op ^= SLJIT_32;
+ if (op & SLJIT_32) {
FAIL_IF(push_inst(compiler, FRSP | FD(dst_r) | FB(src)));
break;
}
@@ -1886,18 +2276,30 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
case SLJIT_ADD_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADD) | FD(dst_r) | FA(src1) | FB(src2)));
break;
-
case SLJIT_SUB_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUB) | FD(dst_r) | FA(src1) | FB(src2)));
break;
-
case SLJIT_MUL_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMUL) | FD(dst_r) | FA(src1) | FC(src2) /* FMUL use FC as src2 */));
break;
-
case SLJIT_DIV_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIV) | FD(dst_r) | FA(src1) | FB(src2)));
break;
+ case SLJIT_COPYSIGN_F64:
+ FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? STFS : STFD) | FS(src2) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
+ FAIL_IF(push_inst(compiler, LWZ | S(TMP_REG1) | A(SLJIT_SP) | ((op & SLJIT_32) ? TMP_MEM_OFFSET : TMP_MEM_OFFSET_HI)));
+#else /* !SLJIT_CONFIG_PPC_32 */
+ FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? LWZ : LD) | S(TMP_REG1) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+#endif /* SLJIT_CONFIG_PPC_32 */
+ FAIL_IF(push_inst(compiler, FABS | FD(dst_r) | FB(src1)));
+#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
+ FAIL_IF(push_inst(compiler, CMPI | CRD(0) | A(TMP_REG1) | 0));
+#else /* !SLJIT_CONFIG_PPC_32 */
+ FAIL_IF(push_inst(compiler, CMPI | CRD(0 | ((op & SLJIT_32) ? 0 : 1)) | A(TMP_REG1) | 0));
+#endif /* SLJIT_CONFIG_PPC_32 */
+ FAIL_IF(push_inst(compiler, BCx | (4 << 21) | (0 << 16) | 8));
+ return push_inst(compiler, FNEG | FD(dst_r) | FB(dst_r));
}
if (dst & SLJIT_MEM)
@@ -1908,22 +2310,24 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
#undef SELECT_FOP
-/* --------------------------------------------------------------------- */
-/* Other instructions */
-/* --------------------------------------------------------------------- */
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
{
+ union {
+ sljit_s32 imm;
+ sljit_f32 value;
+ } u;
+
CHECK_ERROR();
- CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
- ADJUST_LOCAL_OFFSET(dst, dstw);
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
- if (FAST_IS_REG(dst))
- return push_inst(compiler, MFLR | D(dst));
+ u.value = value;
- /* Memory. */
- FAIL_IF(push_inst(compiler, MFLR | D(TMP_REG2)));
- return emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0);
+ if (u.imm != 0)
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm));
+
+ FAIL_IF(push_inst(compiler, STW | S(u.imm != 0 ? TMP_REG1 : TMP_ZERO) | A(SLJIT_SP) | TMP_MEM_OFFSET));
+ return push_inst(compiler, LFS | FS(freg) | A(SLJIT_SP) | TMP_MEM_OFFSET);
}
/* --------------------------------------------------------------------- */
@@ -1946,12 +2350,22 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi
return label;
}
-static sljit_ins get_bo_bi_flags(sljit_s32 type)
+static sljit_ins get_bo_bi_flags(struct sljit_compiler *compiler, sljit_s32 type)
{
switch (type) {
+ case SLJIT_NOT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_SUB)
+ return (4 << 21) | (2 << 16);
+ /* fallthrough */
+
case SLJIT_EQUAL:
return (12 << 21) | (2 << 16);
+ case SLJIT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_SUB)
+ return (12 << 21) | (2 << 16);
+ /* fallthrough */
+
case SLJIT_NOT_EQUAL:
return (4 << 21) | (2 << 16);
@@ -1971,38 +2385,50 @@ static sljit_ins get_bo_bi_flags(sljit_s32 type)
case SLJIT_SIG_LESS_EQUAL:
return (4 << 21) | (1 << 16);
- case SLJIT_LESS_F64:
+ case SLJIT_OVERFLOW:
+ return (12 << 21) | (3 << 16);
+
+ case SLJIT_NOT_OVERFLOW:
+ return (4 << 21) | (3 << 16);
+
+ case SLJIT_F_LESS:
+ case SLJIT_ORDERED_LESS:
+ case SLJIT_UNORDERED_OR_LESS:
return (12 << 21) | ((4 + 0) << 16);
- case SLJIT_GREATER_EQUAL_F64:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
return (4 << 21) | ((4 + 0) << 16);
- case SLJIT_GREATER_F64:
+ case SLJIT_F_GREATER:
+ case SLJIT_ORDERED_GREATER:
+ case SLJIT_UNORDERED_OR_GREATER:
return (12 << 21) | ((4 + 1) << 16);
- case SLJIT_LESS_EQUAL_F64:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
return (4 << 21) | ((4 + 1) << 16);
- case SLJIT_OVERFLOW:
- return (12 << 21) | (3 << 16);
-
- case SLJIT_NOT_OVERFLOW:
- return (4 << 21) | (3 << 16);
-
- case SLJIT_EQUAL_F64:
+ case SLJIT_F_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
return (12 << 21) | ((4 + 2) << 16);
- case SLJIT_NOT_EQUAL_F64:
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
return (4 << 21) | ((4 + 2) << 16);
- case SLJIT_UNORDERED_F64:
+ case SLJIT_UNORDERED:
return (12 << 21) | ((4 + 3) << 16);
- case SLJIT_ORDERED_F64:
+ case SLJIT_ORDERED:
return (4 << 21) | ((4 + 3) << 16);
default:
- SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_CDECL);
+ SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_REG_ARG);
return (20 << 21);
}
}
@@ -2015,15 +2441,18 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_jump(compiler, type));
- bo_bi_flags = get_bo_bi_flags(type & 0xff);
+ bo_bi_flags = get_bo_bi_flags(compiler, type & 0xff);
if (!bo_bi_flags)
return NULL;
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
PTR_FAIL_IF(!jump);
- set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
+ set_jump(jump, compiler, (sljit_u32)type & SLJIT_REWRITABLE_JUMP);
type &= 0xff;
+ if ((type | 0x1) == SLJIT_NOT_CARRY)
+ PTR_FAIL_IF(push_inst(compiler, ADDE | RC(ALT_SET_FLAGS) | D(TMP_REG1) | A(TMP_ZERO) | B(TMP_ZERO)));
+
/* In PPC, we don't need to touch the arguments. */
if (type < SLJIT_JUMP)
jump->flags |= IS_COND;
@@ -2042,18 +2471,22 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 arg_types)
{
+ SLJIT_UNUSED_ARG(arg_types);
+
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
#endif
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if (type & SLJIT_CALL_RETURN) {
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, 0));
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
+ }
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_jump(compiler, type);
}
@@ -2064,34 +2497,35 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
CHECK_ERROR();
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
- ADJUST_LOCAL_OFFSET(src, srcw);
if (FAST_IS_REG(src)) {
#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL)
- if (type >= SLJIT_CALL) {
+ if (type >= SLJIT_CALL && src != TMP_CALL_REG) {
FAIL_IF(push_inst(compiler, OR | S(src) | A(TMP_CALL_REG) | B(src)));
src_r = TMP_CALL_REG;
}
else
src_r = src;
-#else
+#else /* SLJIT_PASS_ENTRY_ADDR_TO_CALL */
src_r = src;
-#endif
- } else if (src & SLJIT_IMM) {
+#endif /* SLJIT_PASS_ENTRY_ADDR_TO_CALL */
+ } else if (src == SLJIT_IMM) {
/* These jumps are converted to jump/call instructions when possible. */
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR);
- jump->u.target = srcw;
+ jump->u.target = (sljit_uw)srcw;
+
#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL)
if (type >= SLJIT_CALL)
jump->flags |= IS_CALL;
-#endif
+#endif /* SLJIT_PASS_ENTRY_ADDR_TO_CALL */
+
FAIL_IF(emit_const(compiler, TMP_CALL_REG, 0));
src_r = TMP_CALL_REG;
- }
- else {
- FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_CALL_REG, 0, TMP_REG1, 0, src, srcw));
+ } else {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_CALL_REG, src, srcw, TMP_CALL_REG));
src_r = TMP_CALL_REG;
}
@@ -2105,24 +2539,33 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
sljit_s32 arg_types,
sljit_s32 src, sljit_sw srcw)
{
+ SLJIT_UNUSED_ARG(arg_types);
+
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
if (src & SLJIT_MEM) {
ADJUST_LOCAL_OFFSET(src, srcw);
- FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_CALL_REG, 0, TMP_REG1, 0, src, srcw));
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_CALL_REG, src, srcw, TMP_CALL_REG));
src = TMP_CALL_REG;
}
- FAIL_IF(call_with_args(compiler, arg_types, &src));
-#endif
+ if (type & SLJIT_CALL_RETURN) {
+ if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, OR | S(src) | A(TMP_CALL_REG) | B(src)));
+ src = TMP_CALL_REG;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
+ type = SLJIT_JUMP;
+ }
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ FAIL_IF(call_with_args(compiler, arg_types, &src));
#endif
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_ijump(compiler, type, src, srcw);
}
@@ -2130,20 +2573,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
{
- sljit_s32 reg, input_flags, cr_bit, invert;
+ sljit_s32 reg, invert;
+ sljit_u32 bit, from_xer;
sljit_s32 saved_op = op;
sljit_sw saved_dstw = dstw;
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ sljit_s32 input_flags = ((op & SLJIT_32) || op == SLJIT_MOV32) ? INT_DATA : WORD_DATA;
+#else
+ sljit_s32 input_flags = WORD_DATA;
+#endif
CHECK_ERROR();
CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
ADJUST_LOCAL_OFFSET(dst, dstw);
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- input_flags = (op & SLJIT_I32_OP) ? INT_DATA : WORD_DATA;
-#else
- input_flags = WORD_DATA;
-#endif
-
op = GET_OPCODE(op);
reg = (op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2;
@@ -2151,9 +2594,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
FAIL_IF(emit_op_mem(compiler, input_flags | LOAD_DATA, TMP_REG1, dst, dstw, TMP_REG1));
invert = 0;
- cr_bit = 0;
+ bit = 0;
+ from_xer = 0;
- switch (type & 0xff) {
+ switch (type) {
case SLJIT_LESS:
case SLJIT_SIG_LESS:
break;
@@ -2165,66 +2609,92 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
case SLJIT_GREATER:
case SLJIT_SIG_GREATER:
- cr_bit = 1;
+ bit = 1;
break;
case SLJIT_LESS_EQUAL:
case SLJIT_SIG_LESS_EQUAL:
- cr_bit = 1;
+ bit = 1;
invert = 1;
break;
case SLJIT_EQUAL:
- cr_bit = 2;
+ bit = 2;
break;
case SLJIT_NOT_EQUAL:
- cr_bit = 2;
+ bit = 2;
invert = 1;
break;
case SLJIT_OVERFLOW:
- cr_bit = 3;
+ from_xer = 1;
+ bit = 1;
break;
case SLJIT_NOT_OVERFLOW:
- cr_bit = 3;
+ from_xer = 1;
+ bit = 1;
invert = 1;
break;
- case SLJIT_LESS_F64:
- cr_bit = 4 + 0;
+ case SLJIT_CARRY:
+ from_xer = 1;
+ bit = 2;
+ invert = (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_SUB) != 0;
+ break;
+
+ case SLJIT_NOT_CARRY:
+ from_xer = 1;
+ bit = 2;
+ invert = (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD) != 0;
+ break;
+
+ case SLJIT_F_LESS:
+ case SLJIT_ORDERED_LESS:
+ case SLJIT_UNORDERED_OR_LESS:
+ bit = 4 + 0;
break;
- case SLJIT_GREATER_EQUAL_F64:
- cr_bit = 4 + 0;
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ bit = 4 + 0;
invert = 1;
break;
- case SLJIT_GREATER_F64:
- cr_bit = 4 + 1;
+ case SLJIT_F_GREATER:
+ case SLJIT_ORDERED_GREATER:
+ case SLJIT_UNORDERED_OR_GREATER:
+ bit = 4 + 1;
break;
- case SLJIT_LESS_EQUAL_F64:
- cr_bit = 4 + 1;
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
+ bit = 4 + 1;
invert = 1;
break;
- case SLJIT_EQUAL_F64:
- cr_bit = 4 + 2;
+ case SLJIT_F_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
+ bit = 4 + 2;
break;
- case SLJIT_NOT_EQUAL_F64:
- cr_bit = 4 + 2;
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ bit = 4 + 2;
invert = 1;
break;
- case SLJIT_UNORDERED_F64:
- cr_bit = 4 + 3;
+ case SLJIT_UNORDERED:
+ bit = 4 + 3;
break;
- case SLJIT_ORDERED_F64:
- cr_bit = 4 + 3;
+ case SLJIT_ORDERED:
+ bit = 4 + 3;
invert = 1;
break;
@@ -2233,8 +2703,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
break;
}
- FAIL_IF(push_inst(compiler, MFCR | D(reg)));
- FAIL_IF(push_inst(compiler, RLWINM | S(reg) | A(reg) | ((1 + (cr_bit)) << 11) | (31 << 6) | (31 << 1)));
+ FAIL_IF(push_inst(compiler, (from_xer ? MFXER : MFCR) | D(reg)));
+ /* Simplified mnemonics: extrwi. */
+ FAIL_IF(push_inst(compiler, RLWINM | S(reg) | A(reg) | RLWI_SH(1 + bit) | RLWI_MBE(31, 31)));
if (invert)
FAIL_IF(push_inst(compiler, XORI | S(reg) | A(reg) | 0x1));
@@ -2245,35 +2716,204 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
return emit_op_mem(compiler, input_flags, reg, dst, dstw, TMP_REG1);
}
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ SLJIT_SKIP_CHECKS(compiler);
+
if (dst & SLJIT_MEM)
return sljit_emit_op2(compiler, saved_op, dst, saved_dstw, TMP_REG1, 0, TMP_REG2, 0);
return sljit_emit_op2(compiler, saved_op, dst, 0, dst, 0, TMP_REG2, 0);
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
+{
+ sljit_ins *ptr;
+ sljit_uw size;
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ sljit_s32 inp_flags = ((type & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA;
+#else /* !SLJIT_CONFIG_PPC_64 */
+ sljit_s32 inp_flags = WORD_DATA | LOAD_DATA;
+#endif /* SLJIT_CONFIG_PPC_64 */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_select(compiler, type, dst_reg, src1, src1w, src2_reg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (dst_reg != src2_reg) {
+ if (dst_reg == src1) {
+ src1 = src2_reg;
+ src1w = 0;
+ type ^= 0x1;
+ } else {
+ if (ADDRESSING_DEPENDS_ON(src1, dst_reg)) {
+ FAIL_IF(push_inst(compiler, OR | S(dst_reg) | A(TMP_REG2) | B(dst_reg)));
+
+ if ((src1 & REG_MASK) == dst_reg)
+ src1 = (src1 & ~REG_MASK) | TMP_REG2;
+
+ if (OFFS_REG(src1) == dst_reg)
+ src1 = (src1 & ~OFFS_REG_MASK) | TO_OFFS_REG(TMP_REG2);
+ }
+
+ FAIL_IF(push_inst(compiler, OR | S(src2_reg) | A(dst_reg) | B(src2_reg)));
+ }
+ }
+
+ if (((type & ~SLJIT_32) | 0x1) == SLJIT_NOT_CARRY)
+ FAIL_IF(push_inst(compiler, ADDE | RC(ALT_SET_FLAGS) | D(TMP_REG1) | A(TMP_ZERO) | B(TMP_ZERO)));
+
+ size = compiler->size;
+
+ ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
+ FAIL_IF(!ptr);
+ compiler->size++;
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, inp_flags, dst_reg, src1, src1w, TMP_REG1));
+ } else if (src1 == SLJIT_IMM) {
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ if (type & SLJIT_32)
+ src1w = (sljit_s32)src1w;
+#endif /* SLJIT_CONFIG_RISCV_64 */
+ FAIL_IF(load_immediate(compiler, dst_reg, src1w));
+ } else
+ FAIL_IF(push_inst(compiler, OR | S(src1) | A(dst_reg) | B(src1)));
+
+ *ptr = BCx | get_bo_bi_flags(compiler, (type ^ 0x1) & ~SLJIT_32) | (sljit_ins)((compiler->size - size) << 2);
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
{
+ sljit_ins *ptr;
+ sljit_uw size;
+
CHECK_ERROR();
- CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
+ CHECK(check_sljit_emit_fselect(compiler, type, dst_freg, src1, src1w, src2_freg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (dst_freg != src2_freg) {
+ if (dst_freg == src1) {
+ src1 = src2_freg;
+ src1w = 0;
+ type ^= 0x1;
+ } else
+ FAIL_IF(push_inst(compiler, FMR | FD(dst_freg) | FB(src2_freg)));
+ }
+
+ if (((type & ~SLJIT_32) | 0x1) == SLJIT_NOT_CARRY)
+ FAIL_IF(push_inst(compiler, ADDE | RC(ALT_SET_FLAGS) | D(TMP_REG1) | A(TMP_ZERO) | B(TMP_ZERO)));
+
+ size = compiler->size;
+
+ ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
+ FAIL_IF(!ptr);
+ compiler->size++;
+
+ if (src1 & SLJIT_MEM)
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(type) | LOAD_DATA, dst_freg, src1, src1w, TMP_REG1));
+ else
+ FAIL_IF(push_inst(compiler, FMR | FD(dst_freg) | FB(src1)));
- return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);;
+ *ptr = BCx | get_bo_bi_flags(compiler, (type ^ 0x1) & ~SLJIT_32) | (sljit_ins)((compiler->size - size) << 2);
+ return SLJIT_SUCCESS;
}
+#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
+
+#define EMIT_MEM_LOAD_IMM(inst, mem, memw) \
+ ((sljit_s16)(memw) > SIMM_MAX - SSIZE_OF(sw))
+
+#else /* !SLJIT_CONFIG_PPC_32 */
+
+#define EMIT_MEM_LOAD_IMM(inst, mem, memw) \
+ ((((inst) & INT_ALIGNED) && ((memw) & 0x3) != 0) \
+ || ((sljit_s16)(memw) > SIMM_MAX - SSIZE_OF(sw)) \
+ || ((memw) > 0x7fff7fffl || (memw) < -0x80000000l)) \
+
+#endif /* SLJIT_CONFIG_PPC_32 */
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 reg,
sljit_s32 mem, sljit_sw memw)
{
- sljit_s32 mem_flags;
sljit_ins inst;
CHECK_ERROR();
CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+ if (!(reg & REG_PAIR_MASK))
+ return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
+
+ ADJUST_LOCAL_OFFSET(mem, memw);
+
+ inst = data_transfer_insts[WORD_DATA | ((type & SLJIT_MEM_STORE) ? 0 : LOAD_DATA)];
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ memw &= 0x3;
+
+ if (memw != 0) {
+ FAIL_IF(push_inst(compiler, SLWI_W(memw) | S(OFFS_REG(mem)) | A(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, ADD | D(TMP_REG1) | A(TMP_REG1) | B(mem & REG_MASK)));
+ } else
+ FAIL_IF(push_inst(compiler, ADD | D(TMP_REG1) | A(mem & REG_MASK) | B(OFFS_REG(mem))));
+
+ mem = TMP_REG1;
+ memw = 0;
+ } else {
+ if (EMIT_MEM_LOAD_IMM(inst, mem, memw)) {
+ if ((mem & REG_MASK) != 0) {
+ SLJIT_SKIP_CHECKS(compiler);
+ FAIL_IF(sljit_emit_op2(compiler, SLJIT_ADD, TMP_REG1, 0, mem & REG_MASK, 0, SLJIT_IMM, memw));
+ } else
+ FAIL_IF(load_immediate(compiler, TMP_REG1, memw));
+
+ memw = 0;
+ mem = TMP_REG1;
+ } else if (memw > SIMM_MAX || memw < SIMM_MIN) {
+ FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG1) | A(mem & REG_MASK) | IMM((memw + 0x8000) >> 16)));
+
+ memw &= 0xffff;
+ mem = TMP_REG1;
+ } else {
+ memw &= 0xffff;
+ mem &= REG_MASK;
+ }
+ }
+
+ SLJIT_ASSERT((memw >= 0 && memw <= SIMM_MAX - SSIZE_OF(sw)) || (memw >= 0x8000 && memw <= 0xffff));
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ inst &= (sljit_ins)~INT_ALIGNED;
+#endif /* SLJIT_CONFIG_PPC_64 */
+
+ if (!(type & SLJIT_MEM_STORE) && mem == REG_PAIR_FIRST(reg)) {
+ FAIL_IF(push_inst(compiler, inst | D(REG_PAIR_SECOND(reg)) | A(mem) | IMM(memw + SSIZE_OF(sw))));
+ return push_inst(compiler, inst | D(REG_PAIR_FIRST(reg)) | A(mem) | IMM(memw));
+ }
+
+ FAIL_IF(push_inst(compiler, inst | D(REG_PAIR_FIRST(reg)) | A(mem) | IMM(memw)));
+ return push_inst(compiler, inst | D(REG_PAIR_SECOND(reg)) | A(mem) | IMM(memw + SSIZE_OF(sw)));
+}
+
+#undef EMIT_MEM_LOAD_IMM
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_s32 mem_flags;
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw));
+
if (type & SLJIT_MEM_POST)
return SLJIT_ERR_UNSUPPORTED;
@@ -2283,19 +2923,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
#endif
mem_flags = WORD_DATA;
break;
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
case SLJIT_MOV_U32:
+ case SLJIT_MOV32:
mem_flags = INT_DATA;
break;
case SLJIT_MOV_S32:
mem_flags = INT_DATA;
- if (!(type & SLJIT_MEM_STORE) && !(type & SLJIT_I32_OP)) {
+ if (!(type & SLJIT_MEM_STORE) && !(type & SLJIT_32)) {
if (mem & OFFS_REG_MASK)
mem_flags |= SIGNED_DATA;
else
@@ -2358,7 +3000,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 freg,
sljit_s32 mem, sljit_sw memw)
{
@@ -2366,7 +3008,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil
sljit_ins inst;
CHECK_ERROR();
- CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
+ CHECK(check_sljit_emit_fmem_update(compiler, type, freg, mem, memw));
if (type & SLJIT_MEM_POST)
return SLJIT_ERR_UNSUPPORTED;
@@ -2414,7 +3056,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi
PTR_FAIL_IF(emit_const(compiler, dst_r, init_value));
if (dst & SLJIT_MEM)
- PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0));
+ PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, dst_r, dst, dstw, TMP_REG1));
return const_;
}
@@ -2436,7 +3078,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
PTR_FAIL_IF(emit_const(compiler, dst_r, 0));
#else
- PTR_FAIL_IF(push_inst(compiler, dst_r));
+ PTR_FAIL_IF(push_inst(compiler, (sljit_ins)dst_r));
compiler->size += 4;
#endif
@@ -2445,3 +3087,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj
return put_label;
}
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
+{
+ sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
+}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_32.c
new file mode 100644
index 0000000000..396c956c19
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_32.c
@@ -0,0 +1,142 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_r, sljit_sw imm, sljit_s32 tmp_r)
+{
+ SLJIT_UNUSED_ARG(tmp_r);
+
+ if (imm <= SIMM_MAX && imm >= SIMM_MIN)
+ return push_inst(compiler, ADDI | RD(dst_r) | RS1(TMP_ZERO) | IMM_I(imm));
+
+ if (imm & 0x800)
+ imm += 0x1000;
+
+ FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(imm & ~0xfff)));
+
+ if ((imm & 0xfff) == 0)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(imm));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ union {
+ sljit_s32 imm[2];
+ sljit_f64 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm[0] != 0)
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm[0], TMP_REG3));
+ if (u.imm[1] != 0)
+ FAIL_IF(load_immediate(compiler, TMP_REG2, u.imm[1], TMP_REG3));
+
+ FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(-16)));
+ FAIL_IF(push_inst(compiler, SW | RS1(SLJIT_SP) | RS2(u.imm[0] != 0 ? TMP_REG1 : TMP_ZERO) | (8 << 7)));
+ FAIL_IF(push_inst(compiler, SW | RS1(SLJIT_SP) | RS2(u.imm[1] != 0 ? TMP_REG2 : TMP_ZERO) | (12 << 7)));
+ FAIL_IF(push_inst(compiler, FLD | FRD(freg) | RS1(SLJIT_SP) | IMM_I(8)));
+ return push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(16));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_ins inst;
+ sljit_s32 reg2 = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ if (op & SLJIT_32) {
+ if (op == SLJIT_COPY32_TO_F32)
+ inst = FMV_W_X | RS1(reg) | FRD(freg);
+ else
+ inst = FMV_X_W | FRS1(freg) | RD(reg);
+
+ return push_inst(compiler, inst);
+ }
+
+ FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(-16)));
+
+ if (reg & REG_PAIR_MASK) {
+ reg2 = REG_PAIR_SECOND(reg);
+ reg = REG_PAIR_FIRST(reg);
+ }
+
+ if (op == SLJIT_COPY_TO_F64) {
+ if (reg2 != 0)
+ FAIL_IF(push_inst(compiler, SW | RS1(SLJIT_SP) | RS2(reg2) | (8 << 7)));
+ else
+ FAIL_IF(push_inst(compiler, FSW | RS1(SLJIT_SP) | FRS2(freg) | (8 << 7)));
+
+ FAIL_IF(push_inst(compiler, SW | RS1(SLJIT_SP) | RS2(reg) | (12 << 7)));
+ FAIL_IF(push_inst(compiler, FLD | FRD(freg) | RS1(SLJIT_SP) | IMM_I(8)));
+ } else {
+ FAIL_IF(push_inst(compiler, FSD | RS1(SLJIT_SP) | FRS2(freg) | (8 << 7)));
+
+ if (reg2 != 0)
+ FAIL_IF(push_inst(compiler, FMV_X_W | FRS1(freg) | RD(reg2)));
+
+ FAIL_IF(push_inst(compiler, LW | RD(reg) | RS1(SLJIT_SP) | IMM_I(12)));
+ }
+
+ return push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(16));
+}
+
+static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value, sljit_ins last_ins)
+{
+ if ((init_value & 0x800) != 0)
+ init_value += 0x1000;
+
+ FAIL_IF(push_inst(compiler, LUI | RD(dst) | (sljit_ins)(init_value & ~0xfff)));
+ return push_inst(compiler, last_ins | RS1(dst) | IMM_I(init_value));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
+{
+ sljit_ins *inst = (sljit_ins*)addr;
+ SLJIT_UNUSED_ARG(executable_offset);
+
+ if ((new_target & 0x800) != 0)
+ new_target += 0x1000;
+
+ SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 0);
+
+ SLJIT_ASSERT((inst[0] & 0x7f) == LUI);
+ inst[0] = (inst[0] & 0xfff) | (sljit_ins)((sljit_sw)new_target & ~0xfff);
+ SLJIT_ASSERT((inst[1] & 0x707f) == ADDI || (inst[1] & 0x707f) == JALR);
+ inst[1] = (inst[1] & 0xfffff) | IMM_I(new_target);
+
+ SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 1);
+ inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ SLJIT_CACHE_FLUSH(inst, inst + 5);
+}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_64.c b/src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_64.c
new file mode 100644
index 0000000000..7fcf2c5273
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_64.c
@@ -0,0 +1,222 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_r, sljit_sw imm, sljit_s32 tmp_r)
+{
+ sljit_sw high;
+
+ if (imm <= SIMM_MAX && imm >= SIMM_MIN)
+ return push_inst(compiler, ADDI | RD(dst_r) | RS1(TMP_ZERO) | IMM_I(imm));
+
+ if (imm <= 0x7fffffffl && imm >= S32_MIN) {
+ if (imm > S32_MAX) {
+ SLJIT_ASSERT((imm & 0x800) != 0);
+ FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)0x80000000u));
+ return push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(imm));
+ }
+
+ if ((imm & 0x800) != 0)
+ imm += 0x1000;
+
+ FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(imm & ~0xfff)));
+
+ if ((imm & 0xfff) == 0)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(imm));
+ }
+
+ /* Trailing zeroes could be used to produce shifted immediates. */
+
+ if (imm <= 0x7ffffffffffl && imm >= -0x80000000000l) {
+ high = imm >> 12;
+
+ if (imm & 0x800)
+ high = ~high;
+
+ if (high > S32_MAX) {
+ SLJIT_ASSERT((high & 0x800) != 0);
+ FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)0x80000000u));
+ FAIL_IF(push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(high)));
+ } else {
+ if ((high & 0x800) != 0)
+ high += 0x1000;
+
+ FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(high & ~0xfff)));
+
+ if ((high & 0xfff) != 0)
+ FAIL_IF(push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(high)));
+ }
+
+ FAIL_IF(push_inst(compiler, SLLI | RD(dst_r) | RS1(dst_r) | IMM_I(12)));
+
+ if ((imm & 0xfff) != 0)
+ return push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(imm));
+
+ return SLJIT_SUCCESS;
+ }
+
+ SLJIT_ASSERT(dst_r != tmp_r);
+
+ high = imm >> 32;
+ imm = (sljit_s32)imm;
+
+ if ((imm & 0x80000000l) != 0)
+ high = ~high;
+
+ if (high <= 0x7ffff && high >= -0x80000) {
+ FAIL_IF(push_inst(compiler, LUI | RD(tmp_r) | (sljit_ins)(high << 12)));
+ high = 0x1000;
+ } else {
+ if ((high & 0x800) != 0)
+ high += 0x1000;
+
+ FAIL_IF(push_inst(compiler, LUI | RD(tmp_r) | (sljit_ins)(high & ~0xfff)));
+ high &= 0xfff;
+ }
+
+ if (imm <= SIMM_MAX && imm >= SIMM_MIN) {
+ FAIL_IF(push_inst(compiler, ADDI | RD(dst_r) | RS1(TMP_ZERO) | IMM_I(imm)));
+ imm = 0;
+ } else if (imm > S32_MAX) {
+ SLJIT_ASSERT((imm & 0x800) != 0);
+
+ FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)0x80000000u));
+ imm = 0x1000 | (imm & 0xfff);
+ } else {
+ if ((imm & 0x800) != 0)
+ imm += 0x1000;
+
+ FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(imm & ~0xfff)));
+ imm &= 0xfff;
+ }
+
+ if ((high & 0xfff) != 0)
+ FAIL_IF(push_inst(compiler, ADDI | RD(tmp_r) | RS1(tmp_r) | IMM_I(high)));
+
+ if (imm & 0x1000)
+ FAIL_IF(push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(imm)));
+ else if (imm != 0)
+ FAIL_IF(push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(imm)));
+
+ FAIL_IF(push_inst(compiler, SLLI | RD(tmp_r) | RS1(tmp_r) | IMM_I((high & 0x1000) ? 20 : 32)));
+ return push_inst(compiler, XOR | RD(dst_r) | RS1(dst_r) | RS2(tmp_r));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ union {
+ sljit_sw imm;
+ sljit_f64 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm == 0)
+ return push_inst(compiler, FMV_W_X | (1 << 25) | RS1(TMP_ZERO) | FRD(freg));
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm, TMP_REG3));
+ return push_inst(compiler, FMV_W_X | (1 << 25) | RS1(TMP_REG1) | FRD(freg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ if (GET_OPCODE(op) == SLJIT_COPY_TO_F64)
+ inst = FMV_W_X | RS1(reg) | FRD(freg);
+ else
+ inst = FMV_X_W | FRS1(freg) | RD(reg);
+
+ if (!(op & SLJIT_32))
+ inst |= (sljit_ins)1 << 25;
+
+ return push_inst(compiler, inst);
+}
+
+static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value, sljit_ins last_ins)
+{
+ sljit_sw high;
+
+ if ((init_value & 0x800) != 0)
+ init_value += 0x1000;
+
+ high = init_value >> 32;
+
+ if ((init_value & 0x80000000l) != 0)
+ high = ~high;
+
+ if ((high & 0x800) != 0)
+ high += 0x1000;
+
+ FAIL_IF(push_inst(compiler, LUI | RD(TMP_REG3) | (sljit_ins)(high & ~0xfff)));
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I(high)));
+ FAIL_IF(push_inst(compiler, LUI | RD(dst) | (sljit_ins)(init_value & ~0xfff)));
+ FAIL_IF(push_inst(compiler, SLLI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I(32)));
+ FAIL_IF(push_inst(compiler, XOR | RD(dst) | RS1(dst) | RS2(TMP_REG3)));
+ return push_inst(compiler, last_ins | RS1(dst) | IMM_I(init_value));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
+{
+ sljit_ins *inst = (sljit_ins*)addr;
+ sljit_sw high;
+ SLJIT_UNUSED_ARG(executable_offset);
+
+ if ((new_target & 0x800) != 0)
+ new_target += 0x1000;
+
+ high = (sljit_sw)new_target >> 32;
+
+ if ((new_target & 0x80000000l) != 0)
+ high = ~high;
+
+ if ((high & 0x800) != 0)
+ high += 0x1000;
+
+ SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 0);
+
+ SLJIT_ASSERT((inst[0] & 0x7f) == LUI);
+ inst[0] = (inst[0] & 0xfff) | (sljit_ins)(high & ~0xfff);
+ SLJIT_ASSERT((inst[1] & 0x707f) == ADDI);
+ inst[1] = (inst[1] & 0xfffff) | IMM_I(high);
+ SLJIT_ASSERT((inst[2] & 0x7f) == LUI);
+ inst[2] = (inst[2] & 0xfff) | (sljit_ins)((sljit_sw)new_target & ~0xfff);
+ SLJIT_ASSERT((inst[5] & 0x707f) == ADDI || (inst[5] & 0x707f) == JALR);
+ inst[5] = (inst[5] & 0xfffff) | IMM_I(new_target);
+ SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 1);
+
+ inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
+ SLJIT_CACHE_FLUSH(inst, inst + 5);
+}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_common.c
new file mode 100644
index 0000000000..64bd411d9d
--- /dev/null
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeRISCV_common.c
@@ -0,0 +1,3013 @@
+/*
+ * Stack-less Just-In-Time compiler
+ *
+ * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
+{
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ return "RISC-V-32" SLJIT_CPUINFO;
+#else /* !SLJIT_CONFIG_RISCV_32 */
+ return "RISC-V-64" SLJIT_CPUINFO;
+#endif /* SLJIT_CONFIG_RISCV_32 */
+}
+
+/* Length of an instruction word
+ Both for riscv-32 and riscv-64 */
+typedef sljit_u32 sljit_ins;
+
+#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
+#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
+#define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4)
+#define TMP_ZERO 0
+
+/* Flags are kept in volatile registers. */
+#define EQUAL_FLAG (SLJIT_NUMBER_OF_REGISTERS + 5)
+#define RETURN_ADDR_REG TMP_REG2
+#define OTHER_FLAG (SLJIT_NUMBER_OF_REGISTERS + 6)
+
+#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
+
+static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 7] = {
+ 0, 10, 11, 12, 13, 14, 15, 16, 17, 29, 30, 31, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 9, 8, 2, 6, 1, 7, 5, 28
+};
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
+ 0, 10, 11, 12, 13, 14, 15, 16, 17, 2, 3, 4, 5, 6, 7, 28, 29, 30, 31, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 9, 8, 0, 1,
+};
+
+/* --------------------------------------------------------------------- */
+/* Instrucion forms */
+/* --------------------------------------------------------------------- */
+
+#define RD(rd) ((sljit_ins)reg_map[rd] << 7)
+#define RS1(rs1) ((sljit_ins)reg_map[rs1] << 15)
+#define RS2(rs2) ((sljit_ins)reg_map[rs2] << 20)
+#define FRD(rd) ((sljit_ins)freg_map[rd] << 7)
+#define FRS1(rs1) ((sljit_ins)freg_map[rs1] << 15)
+#define FRS2(rs2) ((sljit_ins)freg_map[rs2] << 20)
+#define IMM_I(imm) ((sljit_ins)(imm) << 20)
+#define IMM_S(imm) ((((sljit_ins)(imm) & 0xfe0) << 20) | (((sljit_ins)(imm) & 0x1f) << 7))
+
+/* Represents funct(i) parts of the instructions. */
+#define OPC(o) ((sljit_ins)(o))
+#define F3(f) ((sljit_ins)(f) << 12)
+#define F12(f) ((sljit_ins)(f) << 20)
+#define F7(f) ((sljit_ins)(f) << 25)
+
+#define ADD (F7(0x0) | F3(0x0) | OPC(0x33))
+#define ADDI (F3(0x0) | OPC(0x13))
+#define AND (F7(0x0) | F3(0x7) | OPC(0x33))
+#define ANDI (F3(0x7) | OPC(0x13))
+#define AUIPC (OPC(0x17))
+#define BEQ (F3(0x0) | OPC(0x63))
+#define BNE (F3(0x1) | OPC(0x63))
+#define BLT (F3(0x4) | OPC(0x63))
+#define BGE (F3(0x5) | OPC(0x63))
+#define BLTU (F3(0x6) | OPC(0x63))
+#define BGEU (F3(0x7) | OPC(0x63))
+#define DIV (F7(0x1) | F3(0x4) | OPC(0x33))
+#define DIVU (F7(0x1) | F3(0x5) | OPC(0x33))
+#define EBREAK (F12(0x1) | F3(0x0) | OPC(0x73))
+#define FADD_S (F7(0x0) | F3(0x7) | OPC(0x53))
+#define FDIV_S (F7(0xc) | F3(0x7) | OPC(0x53))
+#define FEQ_S (F7(0x50) | F3(0x2) | OPC(0x53))
+#define FLD (F3(0x3) | OPC(0x7))
+#define FLE_S (F7(0x50) | F3(0x0) | OPC(0x53))
+#define FLT_S (F7(0x50) | F3(0x1) | OPC(0x53))
+/* These conversion opcodes are partly defined. */
+#define FCVT_S_D (F7(0x20) | OPC(0x53))
+#define FCVT_S_W (F7(0x68) | OPC(0x53))
+#define FCVT_S_WU (F7(0x68) | F12(0x1) | OPC(0x53))
+#define FCVT_W_S (F7(0x60) | F3(0x1) | OPC(0x53))
+#define FMUL_S (F7(0x8) | F3(0x7) | OPC(0x53))
+#define FMV_X_W (F7(0x70) | F3(0x0) | OPC(0x53))
+#define FMV_W_X (F7(0x78) | F3(0x0) | OPC(0x53))
+#define FSD (F3(0x3) | OPC(0x27))
+#define FSGNJ_S (F7(0x10) | F3(0x0) | OPC(0x53))
+#define FSGNJN_S (F7(0x10) | F3(0x1) | OPC(0x53))
+#define FSGNJX_S (F7(0x10) | F3(0x2) | OPC(0x53))
+#define FSUB_S (F7(0x4) | F3(0x7) | OPC(0x53))
+#define FSW (F3(0x2) | OPC(0x27))
+#define JAL (OPC(0x6f))
+#define JALR (F3(0x0) | OPC(0x67))
+#define LD (F3(0x3) | OPC(0x3))
+#define LUI (OPC(0x37))
+#define LW (F3(0x2) | OPC(0x3))
+#define MUL (F7(0x1) | F3(0x0) | OPC(0x33))
+#define MULH (F7(0x1) | F3(0x1) | OPC(0x33))
+#define MULHU (F7(0x1) | F3(0x3) | OPC(0x33))
+#define OR (F7(0x0) | F3(0x6) | OPC(0x33))
+#define ORI (F3(0x6) | OPC(0x13))
+#define REM (F7(0x1) | F3(0x6) | OPC(0x33))
+#define REMU (F7(0x1) | F3(0x7) | OPC(0x33))
+#define SD (F3(0x3) | OPC(0x23))
+#define SLL (F7(0x0) | F3(0x1) | OPC(0x33))
+#define SLLI (IMM_I(0x0) | F3(0x1) | OPC(0x13))
+#define SLT (F7(0x0) | F3(0x2) | OPC(0x33))
+#define SLTI (F3(0x2) | OPC(0x13))
+#define SLTU (F7(0x0) | F3(0x3) | OPC(0x33))
+#define SLTUI (F3(0x3) | OPC(0x13))
+#define SRL (F7(0x0) | F3(0x5) | OPC(0x33))
+#define SRLI (IMM_I(0x0) | F3(0x5) | OPC(0x13))
+#define SRA (F7(0x20) | F3(0x5) | OPC(0x33))
+#define SRAI (IMM_I(0x400) | F3(0x5) | OPC(0x13))
+#define SUB (F7(0x20) | F3(0x0) | OPC(0x33))
+#define SW (F3(0x2) | OPC(0x23))
+#define XOR (F7(0x0) | F3(0x4) | OPC(0x33))
+#define XORI (F3(0x4) | OPC(0x13))
+
+#define SIMM_MAX (0x7ff)
+#define SIMM_MIN (-0x800)
+#define BRANCH_MAX (0xfff)
+#define BRANCH_MIN (-0x1000)
+#define JUMP_MAX (0xfffff)
+#define JUMP_MIN (-0x100000)
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+#define S32_MAX (0x7ffff7ffl)
+#define S32_MIN (-0x80000000l)
+#define S44_MAX (0x7fffffff7ffl)
+#define S52_MAX (0x7ffffffffffffl)
+#endif
+
+static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins)
+{
+ sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
+ FAIL_IF(!ptr);
+ *ptr = ins;
+ compiler->size++;
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 push_imm_s_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_sw imm)
+{
+ return push_inst(compiler, ins | IMM_S(imm));
+}
+
+static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code, sljit_sw executable_offset)
+{
+ sljit_sw diff;
+ sljit_uw target_addr;
+ sljit_ins *inst;
+
+ inst = (sljit_ins *)jump->addr;
+
+ if (jump->flags & SLJIT_REWRITABLE_JUMP)
+ goto exit;
+
+ if (jump->flags & JUMP_ADDR)
+ target_addr = jump->u.target;
+ else {
+ SLJIT_ASSERT(jump->flags & JUMP_LABEL);
+ target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset;
+ }
+
+ diff = (sljit_sw)target_addr - (sljit_sw)inst - executable_offset;
+
+ if (jump->flags & IS_COND) {
+ inst--;
+ diff += SSIZE_OF(ins);
+
+ if (diff >= BRANCH_MIN && diff <= BRANCH_MAX) {
+ jump->flags |= PATCH_B;
+ inst[0] = (inst[0] & 0x1fff07f) ^ 0x1000;
+ jump->addr = (sljit_uw)inst;
+ return inst;
+ }
+
+ inst++;
+ diff -= SSIZE_OF(ins);
+ }
+
+ if (diff >= JUMP_MIN && diff <= JUMP_MAX) {
+ if (jump->flags & IS_COND) {
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ inst[-1] -= (sljit_ins)(1 * sizeof(sljit_ins)) << 7;
+#else
+ inst[-1] -= (sljit_ins)(5 * sizeof(sljit_ins)) << 7;
+#endif
+ }
+
+ jump->flags |= PATCH_J;
+ return inst;
+ }
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ if (diff >= S32_MIN && diff <= S32_MAX) {
+ if (jump->flags & IS_COND)
+ inst[-1] -= (sljit_ins)(4 * sizeof(sljit_ins)) << 7;
+
+ jump->flags |= PATCH_REL32;
+ inst[1] = inst[0];
+ return inst + 1;
+ }
+
+ if (target_addr <= (sljit_uw)S32_MAX) {
+ if (jump->flags & IS_COND)
+ inst[-1] -= (sljit_ins)(4 * sizeof(sljit_ins)) << 7;
+
+ jump->flags |= PATCH_ABS32;
+ inst[1] = inst[0];
+ return inst + 1;
+ }
+
+ if (target_addr <= S44_MAX) {
+ if (jump->flags & IS_COND)
+ inst[-1] -= (sljit_ins)(2 * sizeof(sljit_ins)) << 7;
+
+ jump->flags |= PATCH_ABS44;
+ inst[3] = inst[0];
+ return inst + 3;
+ }
+
+ if (target_addr <= S52_MAX) {
+ if (jump->flags & IS_COND)
+ inst[-1] -= (sljit_ins)(1 * sizeof(sljit_ins)) << 7;
+
+ jump->flags |= PATCH_ABS52;
+ inst[4] = inst[0];
+ return inst + 4;
+ }
+#endif
+
+exit:
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ inst[1] = inst[0];
+ return inst + 1;
+#else
+ inst[5] = inst[0];
+ return inst + 5;
+#endif
+}
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+
+static SLJIT_INLINE sljit_sw put_label_get_length(struct sljit_put_label *put_label, sljit_uw max_label)
+{
+ if (max_label <= (sljit_uw)S32_MAX) {
+ put_label->flags = PATCH_ABS32;
+ return 1;
+ }
+
+ if (max_label <= S44_MAX) {
+ put_label->flags = PATCH_ABS44;
+ return 3;
+ }
+
+ if (max_label <= S52_MAX) {
+ put_label->flags = PATCH_ABS52;
+ return 4;
+ }
+
+ put_label->flags = 0;
+ return 5;
+}
+
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+static SLJIT_INLINE void load_addr_to_reg(void *dst, sljit_u32 reg)
+{
+ struct sljit_jump *jump = NULL;
+ struct sljit_put_label *put_label;
+ sljit_uw flags;
+ sljit_ins *inst;
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ sljit_sw high;
+#endif
+ sljit_uw addr;
+
+ if (reg != 0) {
+ jump = (struct sljit_jump*)dst;
+ flags = jump->flags;
+ inst = (sljit_ins*)jump->addr;
+ addr = (flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
+ } else {
+ put_label = (struct sljit_put_label*)dst;
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ flags = put_label->flags;
+#endif
+ inst = (sljit_ins*)put_label->addr;
+ addr = put_label->label->addr;
+ reg = *inst;
+ }
+
+ if ((addr & 0x800) != 0)
+ addr += 0x1000;
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ inst[0] = LUI | RD(reg) | (sljit_ins)((sljit_sw)addr & ~0xfff);
+#else /* !SLJIT_CONFIG_RISCV_32 */
+
+ if (flags & PATCH_ABS32) {
+ SLJIT_ASSERT(addr <= S32_MAX);
+ inst[0] = LUI | RD(reg) | (sljit_ins)((sljit_sw)addr & ~0xfff);
+ } else if (flags & PATCH_ABS44) {
+ high = (sljit_sw)addr >> 12;
+ SLJIT_ASSERT((sljit_uw)high <= 0x7fffffff);
+
+ if (high > S32_MAX) {
+ SLJIT_ASSERT((high & 0x800) != 0);
+ inst[0] = LUI | RD(reg) | (sljit_ins)0x80000000u;
+ inst[1] = XORI | RD(reg) | RS1(reg) | IMM_I(high);
+ } else {
+ if ((high & 0x800) != 0)
+ high += 0x1000;
+
+ inst[0] = LUI | RD(reg) | (sljit_ins)(high & ~0xfff);
+ inst[1] = ADDI | RD(reg) | RS1(reg) | IMM_I(high);
+ }
+
+ inst[2] = SLLI | RD(reg) | RS1(reg) | IMM_I(12);
+ inst += 2;
+ } else {
+ high = (sljit_sw)addr >> 32;
+
+ if ((addr & 0x80000000l) != 0)
+ high = ~high;
+
+ if (flags & PATCH_ABS52) {
+ SLJIT_ASSERT(addr <= S52_MAX);
+ inst[0] = LUI | RD(TMP_REG3) | (sljit_ins)(high << 12);
+ } else {
+ if ((high & 0x800) != 0)
+ high += 0x1000;
+ inst[0] = LUI | RD(TMP_REG3) | (sljit_ins)(high & ~0xfff);
+ inst[1] = ADDI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I(high);
+ inst++;
+ }
+
+ inst[1] = LUI | RD(reg) | (sljit_ins)((sljit_sw)addr & ~0xfff);
+ inst[2] = SLLI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I((flags & PATCH_ABS52) ? 20 : 32);
+ inst[3] = XOR | RD(reg) | RS1(reg) | RS2(TMP_REG3);
+ inst += 3;
+ }
+#endif /* !SLJIT_CONFIG_RISCV_32 */
+
+ if (jump != NULL) {
+ SLJIT_ASSERT((inst[1] & 0x707f) == JALR);
+ inst[1] = (inst[1] & 0xfffff) | IMM_I(addr);
+ } else
+ inst[1] = ADDI | RD(reg) | RS1(reg) | IMM_I(addr);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
+{
+ struct sljit_memory_fragment *buf;
+ sljit_ins *code;
+ sljit_ins *code_ptr;
+ sljit_ins *buf_ptr;
+ sljit_ins *buf_end;
+ sljit_uw word_count;
+ sljit_uw next_addr;
+ sljit_sw executable_offset;
+ sljit_uw addr;
+
+ struct sljit_label *label;
+ struct sljit_jump *jump;
+ struct sljit_const *const_;
+ struct sljit_put_label *put_label;
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_generate_code(compiler));
+ reverse_buf(compiler);
+
+ code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins), compiler->exec_allocator_data);
+ PTR_FAIL_WITH_EXEC_IF(code);
+ buf = compiler->buf;
+
+ code_ptr = code;
+ word_count = 0;
+ next_addr = 0;
+ executable_offset = SLJIT_EXEC_OFFSET(code);
+
+ label = compiler->labels;
+ jump = compiler->jumps;
+ const_ = compiler->consts;
+ put_label = compiler->put_labels;
+
+ do {
+ buf_ptr = (sljit_ins*)buf->memory;
+ buf_end = buf_ptr + (buf->used_size >> 2);
+ do {
+ *code_ptr = *buf_ptr++;
+ if (next_addr == word_count) {
+ SLJIT_ASSERT(!label || label->size >= word_count);
+ SLJIT_ASSERT(!jump || jump->addr >= word_count);
+ SLJIT_ASSERT(!const_ || const_->addr >= word_count);
+ SLJIT_ASSERT(!put_label || put_label->addr >= word_count);
+
+ /* These structures are ordered by their address. */
+ if (label && label->size == word_count) {
+ label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
+ label->size = (sljit_uw)(code_ptr - code);
+ label = label->next;
+ }
+ if (jump && jump->addr == word_count) {
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ word_count += 1;
+#else
+ word_count += 5;
+#endif
+ jump->addr = (sljit_uw)code_ptr;
+ code_ptr = detect_jump_type(jump, code, executable_offset);
+ jump = jump->next;
+ }
+ if (const_ && const_->addr == word_count) {
+ const_->addr = (sljit_uw)code_ptr;
+ const_ = const_->next;
+ }
+ if (put_label && put_label->addr == word_count) {
+ SLJIT_ASSERT(put_label->label);
+ put_label->addr = (sljit_uw)code_ptr;
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ code_ptr += 1;
+ word_count += 1;
+#else
+ code_ptr += put_label_get_length(put_label, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size));
+ word_count += 5;
+#endif
+ put_label = put_label->next;
+ }
+ next_addr = compute_next_addr(label, jump, const_, put_label);
+ }
+ code_ptr++;
+ word_count++;
+ } while (buf_ptr < buf_end);
+
+ buf = buf->next;
+ } while (buf);
+
+ if (label && label->size == word_count) {
+ label->addr = (sljit_uw)code_ptr;
+ label->size = (sljit_uw)(code_ptr - code);
+ label = label->next;
+ }
+
+ SLJIT_ASSERT(!label);
+ SLJIT_ASSERT(!jump);
+ SLJIT_ASSERT(!const_);
+ SLJIT_ASSERT(!put_label);
+ SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size);
+
+ jump = compiler->jumps;
+ while (jump) {
+ do {
+ if (!(jump->flags & (PATCH_B | PATCH_J | PATCH_REL32))) {
+ load_addr_to_reg(jump, TMP_REG1);
+ break;
+ }
+
+ addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
+ buf_ptr = (sljit_ins *)jump->addr;
+ addr -= (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset);
+
+ if (jump->flags & PATCH_B) {
+ SLJIT_ASSERT((sljit_sw)addr >= BRANCH_MIN && (sljit_sw)addr <= BRANCH_MAX);
+ addr = ((addr & 0x800) >> 4) | ((addr & 0x1e) << 7) | ((addr & 0x7e0) << 20) | ((addr & 0x1000) << 19);
+ buf_ptr[0] |= (sljit_ins)addr;
+ break;
+ }
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ if (jump->flags & PATCH_REL32) {
+ SLJIT_ASSERT((sljit_sw)addr >= S32_MIN && (sljit_sw)addr <= S32_MAX);
+
+ if ((addr & 0x800) != 0)
+ addr += 0x1000;
+
+ buf_ptr[0] = AUIPC | RD(TMP_REG1) | (sljit_ins)((sljit_sw)addr & ~0xfff);
+ SLJIT_ASSERT((buf_ptr[1] & 0x707f) == JALR);
+ buf_ptr[1] |= IMM_I(addr);
+ break;
+ }
+#endif
+
+ SLJIT_ASSERT((sljit_sw)addr >= JUMP_MIN && (sljit_sw)addr <= JUMP_MAX);
+ addr = (addr & 0xff000) | ((addr & 0x800) << 9) | ((addr & 0x7fe) << 20) | ((addr & 0x100000) << 11);
+ buf_ptr[0] = JAL | RD((jump->flags & IS_CALL) ? RETURN_ADDR_REG : TMP_ZERO) | (sljit_ins)addr;
+ } while (0);
+ jump = jump->next;
+ }
+
+ put_label = compiler->put_labels;
+ while (put_label) {
+ load_addr_to_reg(put_label, 0);
+ put_label = put_label->next;
+ }
+
+ compiler->error = SLJIT_ERR_COMPILED;
+ compiler->executable_offset = executable_offset;
+ compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_ins);
+
+ code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
+ code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
+
+ SLJIT_CACHE_FLUSH(code, code_ptr);
+ SLJIT_UPDATE_WX_FLAGS(code, code_ptr, 1);
+ return code;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
+{
+ switch (feature_type) {
+ case SLJIT_HAS_FPU:
+#ifdef SLJIT_IS_FPU_AVAILABLE
+ return (SLJIT_IS_FPU_AVAILABLE) != 0;
+#elif defined(__riscv_float_abi_soft)
+ return 0;
+#else
+ return 1;
+#endif /* SLJIT_IS_FPU_AVAILABLE */
+ case SLJIT_HAS_ZERO_REGISTER:
+ case SLJIT_HAS_COPY_F32:
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ case SLJIT_HAS_COPY_F64:
+#endif /* !SLJIT_CONFIG_RISCV_64 */
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
+{
+ switch (type) {
+ case SLJIT_UNORDERED_OR_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ return 2;
+
+ case SLJIT_UNORDERED:
+ case SLJIT_ORDERED:
+ return 1;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+/* Entry, exit */
+/* --------------------------------------------------------------------- */
+
+/* Creates an index in data_transfer_insts array. */
+#define LOAD_DATA 0x01
+#define WORD_DATA 0x00
+#define BYTE_DATA 0x02
+#define HALF_DATA 0x04
+#define INT_DATA 0x06
+#define SIGNED_DATA 0x08
+/* Separates integer and floating point registers */
+#define GPR_REG 0x0f
+#define DOUBLE_DATA 0x10
+#define SINGLE_DATA 0x12
+
+#define MEM_MASK 0x1f
+
+#define ARG_TEST 0x00020
+#define ALT_KEEP_CACHE 0x00040
+#define CUMULATIVE_OP 0x00080
+#define IMM_OP 0x00100
+#define MOVE_OP 0x00200
+#define SRC2_IMM 0x00400
+
+#define UNUSED_DEST 0x00800
+#define REG_DEST 0x01000
+#define REG1_SOURCE 0x02000
+#define REG2_SOURCE 0x04000
+#define SLOW_SRC1 0x08000
+#define SLOW_SRC2 0x10000
+#define SLOW_DEST 0x20000
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+#define STACK_STORE SW
+#define STACK_LOAD LW
+#else
+#define STACK_STORE SD
+#define STACK_LOAD LD
+#endif
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+#include "sljitNativeRISCV_32.c"
+#else
+#include "sljitNativeRISCV_64.c"
+#endif
+
+#define STACK_MAX_DISTANCE (-SIMM_MIN)
+
+static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw);
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
+{
+ sljit_s32 i, tmp, offset;
+ sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
+
+ local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1);
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ if ((local_size & SSIZE_OF(sw)) != 0)
+ local_size += SSIZE_OF(sw);
+ local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+ }
+#else
+ local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+#endif
+ local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf;
+ compiler->local_size = local_size;
+
+ if (local_size <= STACK_MAX_DISTANCE) {
+ /* Frequent case. */
+ FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(-local_size)));
+ offset = local_size - SSIZE_OF(sw);
+ local_size = 0;
+ } else {
+ FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(STACK_MAX_DISTANCE)));
+ local_size -= STACK_MAX_DISTANCE;
+
+ if (local_size > STACK_MAX_DISTANCE)
+ FAIL_IF(load_immediate(compiler, TMP_REG1, local_size, TMP_REG3));
+ offset = STACK_MAX_DISTANCE - SSIZE_OF(sw);
+ }
+
+ FAIL_IF(push_imm_s_inst(compiler, STACK_STORE | RS1(SLJIT_SP) | RS2(RETURN_ADDR_REG), offset));
+
+ tmp = SLJIT_S0 - saveds;
+ for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_imm_s_inst(compiler, STACK_STORE | RS1(SLJIT_SP) | RS2(i), offset));
+ }
+
+ for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_imm_s_inst(compiler, STACK_STORE | RS1(SLJIT_SP) | RS2(i), offset));
+ }
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ /* This alignment is valid because offset is not used after storing FPU regs. */
+ if ((offset & SSIZE_OF(sw)) != 0)
+ offset -= SSIZE_OF(sw);
+#endif
+
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_imm_s_inst(compiler, FSD | RS1(SLJIT_SP) | FRS2(i), offset));
+ }
+
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_imm_s_inst(compiler, FSD | RS1(SLJIT_SP) | FRS2(i), offset));
+ }
+
+ if (local_size > STACK_MAX_DISTANCE)
+ FAIL_IF(push_inst(compiler, SUB | RD(SLJIT_SP) | RS1(SLJIT_SP) | RS2(TMP_REG1)));
+ else if (local_size > 0)
+ FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(-local_size)));
+
+ if (options & SLJIT_ENTER_REG_ARG)
+ return SLJIT_SUCCESS;
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ saved_arg_count = 0;
+ tmp = SLJIT_R0;
+
+ while (arg_types > 0) {
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_S0 - saved_arg_count) | RS1(tmp) | IMM_I(0)));
+ saved_arg_count++;
+ }
+ tmp++;
+ }
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+#undef STACK_MAX_DISTANCE
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
+
+ local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1);
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ if ((local_size & SSIZE_OF(sw)) != 0)
+ local_size += SSIZE_OF(sw);
+ local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+ }
+#else
+ local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, f64);
+#endif
+ compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf;
+
+ return SLJIT_SUCCESS;
+}
+
+#define STACK_MAX_DISTANCE (-SIMM_MIN - 16)
+
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to)
+{
+ sljit_s32 i, tmp, offset;
+ sljit_s32 local_size = compiler->local_size;
+
+ if (local_size > STACK_MAX_DISTANCE) {
+ local_size -= STACK_MAX_DISTANCE;
+
+ if (local_size > STACK_MAX_DISTANCE) {
+ FAIL_IF(load_immediate(compiler, TMP_REG2, local_size, TMP_REG3));
+ FAIL_IF(push_inst(compiler, ADD | RD(SLJIT_SP) | RS1(SLJIT_SP) | RS2(TMP_REG2)));
+ } else
+ FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(local_size)));
+
+ local_size = STACK_MAX_DISTANCE;
+ }
+
+ SLJIT_ASSERT(local_size > 0);
+
+ offset = local_size - SSIZE_OF(sw);
+ if (!is_return_to)
+ FAIL_IF(push_inst(compiler, STACK_LOAD | RD(RETURN_ADDR_REG) | RS1(SLJIT_SP) | IMM_I(offset)));
+
+ tmp = SLJIT_S0 - compiler->saveds;
+ for (i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); i > tmp; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STACK_LOAD | RD(i) | RS1(SLJIT_SP) | IMM_I(offset)));
+ }
+
+ for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
+ offset -= SSIZE_OF(sw);
+ FAIL_IF(push_inst(compiler, STACK_LOAD | RD(i) | RS1(SLJIT_SP) | IMM_I(offset)));
+ }
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ /* This alignment is valid because offset is not used after storing FPU regs. */
+ if ((offset & SSIZE_OF(sw)) != 0)
+ offset -= SSIZE_OF(sw);
+#endif
+
+ tmp = SLJIT_FS0 - compiler->fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, FLD | FRD(i) | RS1(SLJIT_SP) | IMM_I(offset)));
+ }
+
+ for (i = compiler->fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(push_inst(compiler, FLD | FRD(i) | RS1(SLJIT_SP) | IMM_I(offset)));
+ }
+
+ return push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(local_size));
+}
+
+#undef STACK_MAX_DISTANCE
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
+ return push_inst(compiler, JALR | RD(TMP_ZERO) | RS1(RETURN_ADDR_REG) | IMM_I(0));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_to(compiler, src, srcw));
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw));
+ src = TMP_REG1;
+ srcw = 0;
+ } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(src) | IMM_I(0)));
+ src = TMP_REG1;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 1));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
+}
+
+/* --------------------------------------------------------------------- */
+/* Operators */
+/* --------------------------------------------------------------------- */
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+#define ARCH_32_64(a, b) a
+#else
+#define ARCH_32_64(a, b) b
+#endif
+
+static const sljit_ins data_transfer_insts[16 + 4] = {
+/* u w s */ ARCH_32_64(F3(0x2) | OPC(0x23) /* sw */, F3(0x3) | OPC(0x23) /* sd */),
+/* u w l */ ARCH_32_64(F3(0x2) | OPC(0x3) /* lw */, F3(0x3) | OPC(0x3) /* ld */),
+/* u b s */ F3(0x0) | OPC(0x23) /* sb */,
+/* u b l */ F3(0x4) | OPC(0x3) /* lbu */,
+/* u h s */ F3(0x1) | OPC(0x23) /* sh */,
+/* u h l */ F3(0x5) | OPC(0x3) /* lhu */,
+/* u i s */ F3(0x2) | OPC(0x23) /* sw */,
+/* u i l */ ARCH_32_64(F3(0x2) | OPC(0x3) /* lw */, F3(0x6) | OPC(0x3) /* lwu */),
+
+/* s w s */ ARCH_32_64(F3(0x2) | OPC(0x23) /* sw */, F3(0x3) | OPC(0x23) /* sd */),
+/* s w l */ ARCH_32_64(F3(0x2) | OPC(0x3) /* lw */, F3(0x3) | OPC(0x3) /* ld */),
+/* s b s */ F3(0x0) | OPC(0x23) /* sb */,
+/* s b l */ F3(0x0) | OPC(0x3) /* lb */,
+/* s h s */ F3(0x1) | OPC(0x23) /* sh */,
+/* s h l */ F3(0x1) | OPC(0x3) /* lh */,
+/* s i s */ F3(0x2) | OPC(0x23) /* sw */,
+/* s i l */ F3(0x2) | OPC(0x3) /* lw */,
+
+/* d s */ F3(0x3) | OPC(0x27) /* fsd */,
+/* d l */ F3(0x3) | OPC(0x7) /* fld */,
+/* s s */ F3(0x2) | OPC(0x27) /* fsw */,
+/* s l */ F3(0x2) | OPC(0x7) /* flw */,
+};
+
+#undef ARCH_32_64
+
+static sljit_s32 push_mem_inst(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 base, sljit_sw offset)
+{
+ sljit_ins ins;
+
+ SLJIT_ASSERT(FAST_IS_REG(base) && offset <= 0xfff && offset >= SIMM_MIN);
+
+ ins = data_transfer_insts[flags & MEM_MASK] | RS1(base);
+ if (flags & LOAD_DATA)
+ ins |= ((flags & MEM_MASK) <= GPR_REG ? RD(reg) : FRD(reg)) | IMM_I(offset);
+ else
+ ins |= ((flags & MEM_MASK) <= GPR_REG ? RS2(reg) : FRS2(reg)) | IMM_S(offset);
+
+ return push_inst(compiler, ins);
+}
+
+/* Can perform an operation using at most 1 instruction. */
+static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
+{
+
+ SLJIT_ASSERT(arg & SLJIT_MEM);
+
+ if (!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN) {
+ /* Works for both absoulte and relative addresses. */
+ if (SLJIT_UNLIKELY(flags & ARG_TEST))
+ return 1;
+
+ FAIL_IF(push_mem_inst(compiler, flags, reg, arg & REG_MASK, argw));
+ return -1;
+ }
+ return 0;
+}
+
+#define TO_ARGW_HI(argw) (((argw) & ~0xfff) + (((argw) & 0x800) ? 0x1000 : 0))
+
+/* See getput_arg below.
+ Note: can_cache is called only for binary operators. */
+static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
+{
+ SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM));
+
+ /* Simple operation except for updates. */
+ if (arg & OFFS_REG_MASK) {
+ argw &= 0x3;
+ next_argw &= 0x3;
+ if (argw && argw == next_argw && (arg == next_arg || (arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK)))
+ return 1;
+ return 0;
+ }
+
+ if (arg == next_arg) {
+ if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN)
+ || TO_ARGW_HI(argw) == TO_ARGW_HI(next_argw))
+ return 1;
+ return 0;
+ }
+
+ return 0;
+}
+
+/* Emit the necessary instructions. See can_cache above. */
+static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
+{
+ sljit_s32 base = arg & REG_MASK;
+ sljit_s32 tmp_r = TMP_REG1;
+ sljit_sw offset, argw_hi;
+
+ SLJIT_ASSERT(arg & SLJIT_MEM);
+ if (!(next_arg & SLJIT_MEM)) {
+ next_arg = 0;
+ next_argw = 0;
+ }
+
+ /* Since tmp can be the same as base or offset registers,
+ * these might be unavailable after modifying tmp. */
+ if ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA) && reg == TMP_REG2)
+ tmp_r = reg;
+
+ if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
+ argw &= 0x3;
+
+ /* Using the cache. */
+ if (argw == compiler->cache_argw) {
+ if (arg == compiler->cache_arg)
+ return push_mem_inst(compiler, flags, reg, TMP_REG3, 0);
+
+ if ((SLJIT_MEM | (arg & OFFS_REG_MASK)) == compiler->cache_arg) {
+ if (arg == next_arg && argw == (next_argw & 0x3)) {
+ compiler->cache_arg = arg;
+ compiler->cache_argw = argw;
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG3) | RS1(TMP_REG3) | RS2(base)));
+ return push_mem_inst(compiler, flags, reg, TMP_REG3, 0);
+ }
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(base) | RS2(TMP_REG3)));
+ return push_mem_inst(compiler, flags, reg, tmp_r, 0);
+ }
+ }
+
+ if (SLJIT_UNLIKELY(argw)) {
+ compiler->cache_arg = SLJIT_MEM | (arg & OFFS_REG_MASK);
+ compiler->cache_argw = argw;
+ FAIL_IF(push_inst(compiler, SLLI | RD(TMP_REG3) | RS1(OFFS_REG(arg)) | IMM_I(argw)));
+ }
+
+ if (arg == next_arg && argw == (next_argw & 0x3)) {
+ compiler->cache_arg = arg;
+ compiler->cache_argw = argw;
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG3) | RS1(base) | RS2(!argw ? OFFS_REG(arg) : TMP_REG3)));
+ tmp_r = TMP_REG3;
+ }
+ else
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(base) | RS2(!argw ? OFFS_REG(arg) : TMP_REG3)));
+ return push_mem_inst(compiler, flags, reg, tmp_r, 0);
+ }
+
+ if (compiler->cache_arg == arg && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN)
+ return push_mem_inst(compiler, flags, reg, TMP_REG3, argw - compiler->cache_argw);
+
+ if (compiler->cache_arg == SLJIT_MEM && (argw - compiler->cache_argw <= SIMM_MAX) && (argw - compiler->cache_argw >= SIMM_MIN)) {
+ offset = argw - compiler->cache_argw;
+ } else {
+ compiler->cache_arg = SLJIT_MEM;
+
+ argw_hi = TO_ARGW_HI(argw);
+
+ if (next_arg && next_argw - argw <= SIMM_MAX && next_argw - argw >= SIMM_MIN && argw_hi != TO_ARGW_HI(next_argw)) {
+ FAIL_IF(load_immediate(compiler, TMP_REG3, argw, tmp_r));
+ compiler->cache_argw = argw;
+ offset = 0;
+ } else {
+ FAIL_IF(load_immediate(compiler, TMP_REG3, argw_hi, tmp_r));
+ compiler->cache_argw = argw_hi;
+ offset = argw & 0xfff;
+ argw = argw_hi;
+ }
+ }
+
+ if (!base)
+ return push_mem_inst(compiler, flags, reg, TMP_REG3, offset);
+
+ if (arg == next_arg && next_argw - argw <= SIMM_MAX && next_argw - argw >= SIMM_MIN) {
+ compiler->cache_arg = arg;
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG3) | RS1(TMP_REG3) | RS2(base)));
+ return push_mem_inst(compiler, flags, reg, TMP_REG3, offset);
+ }
+
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(TMP_REG3) | RS2(base)));
+ return push_mem_inst(compiler, flags, reg, tmp_r, offset);
+}
+
+static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
+{
+ sljit_s32 base = arg & REG_MASK;
+ sljit_s32 tmp_r = TMP_REG1;
+
+ if (getput_arg_fast(compiler, flags, reg, arg, argw))
+ return compiler->error;
+
+ if ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA))
+ tmp_r = reg;
+
+ if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
+ argw &= 0x3;
+
+ if (SLJIT_UNLIKELY(argw)) {
+ FAIL_IF(push_inst(compiler, SLLI | RD(tmp_r) | RS1(OFFS_REG(arg)) | IMM_I(argw)));
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(tmp_r) | RS2(base)));
+ }
+ else
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(base) | RS2(OFFS_REG(arg))));
+
+ argw = 0;
+ } else {
+ FAIL_IF(load_immediate(compiler, tmp_r, TO_ARGW_HI(argw), TMP_REG3));
+
+ if (base != 0)
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(tmp_r) | RS2(base)));
+ }
+
+ return push_mem_inst(compiler, flags, reg, tmp_r, argw & 0xfff);
+}
+
+static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w)
+{
+ if (getput_arg_fast(compiler, flags, reg, arg1, arg1w))
+ return compiler->error;
+ return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w);
+}
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+#define WORD 0
+#define WORD_32 0
+#define IMM_EXTEND(v) (IMM_I(v))
+#else /* !SLJIT_CONFIG_RISCV_32 */
+#define WORD word
+#define WORD_32 0x08
+#define IMM_EXTEND(v) (IMM_I((op & SLJIT_32) ? (v) : (32 + (v))))
+#endif /* SLJIT_CONFIG_RISCV_32 */
+
+static sljit_s32 emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw src)
+{
+ sljit_s32 is_clz = (GET_OPCODE(op) == SLJIT_CLZ);
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ sljit_ins word = (sljit_ins)(op & SLJIT_32) >> 5;
+ sljit_ins word_size = (op & SLJIT_32) ? 32 : 64;
+#else /* !SLJIT_CONFIG_RISCV_64 */
+ sljit_ins word_size = 32;
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+ SLJIT_ASSERT(WORD == 0 || WORD == 0x8);
+
+ /* The OTHER_FLAG is the counter. */
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(OTHER_FLAG) | RS1(TMP_ZERO) | IMM_I(word_size)));
+
+ /* The TMP_REG2 is the next value. */
+ if (src != TMP_REG2)
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(TMP_REG2) | RS1(src) | IMM_I(0)));
+
+ FAIL_IF(push_inst(compiler, BEQ | RS1(TMP_REG2) | RS2(TMP_ZERO) | ((sljit_ins)((is_clz ? 4 : 5) * SSIZE_OF(ins)) << 7) | ((sljit_ins)(8 * SSIZE_OF(ins)) << 20)));
+
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(OTHER_FLAG) | RS1(TMP_ZERO) | IMM_I(0)));
+ if (!is_clz) {
+ FAIL_IF(push_inst(compiler, ANDI | RD(TMP_REG1) | RS1(TMP_REG2) | IMM_I(1)));
+ FAIL_IF(push_inst(compiler, BNE | RS1(TMP_REG1) | RS2(TMP_ZERO) | ((sljit_ins)(2 * SSIZE_OF(ins)) << 7) | ((sljit_ins)(8 * SSIZE_OF(ins)) << 20)));
+ } else
+ FAIL_IF(push_inst(compiler, BLT | RS1(TMP_REG2) | RS2(TMP_ZERO) | ((sljit_ins)(2 * SSIZE_OF(ins)) << 7) | ((sljit_ins)(8 * SSIZE_OF(ins)) << 20)));
+
+ /* The TMP_REG1 is the next shift. */
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(TMP_REG1) | RS1(TMP_ZERO) | IMM_I(word_size)));
+
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(EQUAL_FLAG) | RS1(TMP_REG2) | IMM_I(0)));
+ FAIL_IF(push_inst(compiler, SRLI | WORD | RD(TMP_REG1) | RS1(TMP_REG1) | IMM_I(1)));
+
+ FAIL_IF(push_inst(compiler, (is_clz ? SRL : SLL) | WORD | RD(TMP_REG2) | RS1(EQUAL_FLAG) | RS2(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, BNE | RS1(TMP_REG2) | RS2(TMP_ZERO) | ((sljit_ins)0xfe000e80 - ((2 * SSIZE_OF(ins)) << 7))));
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(TMP_REG2) | RS1(TMP_REG1) | IMM_I(-1)));
+ FAIL_IF(push_inst(compiler, (is_clz ? SRL : SLL) | WORD | RD(TMP_REG2) | RS1(EQUAL_FLAG) | RS2(TMP_REG2)));
+ FAIL_IF(push_inst(compiler, OR | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | RS2(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, BEQ | RS1(TMP_REG2) | RS2(TMP_ZERO) | ((sljit_ins)0xfe000e80 - ((5 * SSIZE_OF(ins)) << 7))));
+
+ return push_inst(compiler, ADDI | WORD | RD(dst) | RS1(OTHER_FLAG) | IMM_I(0));
+}
+
+static sljit_s32 emit_rev(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw src)
+{
+ SLJIT_UNUSED_ARG(op);
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ if (!(op & SLJIT_32)) {
+ FAIL_IF(push_inst(compiler, LUI | RD(OTHER_FLAG) | 0x10000));
+ FAIL_IF(push_inst(compiler, SRLI | RD(TMP_REG1) | RS1(src) | IMM_I(32)));
+ FAIL_IF(push_inst(compiler, ADDI | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | IMM_I(0xfff)));
+ FAIL_IF(push_inst(compiler, SLLI | RD(dst) | RS1(src) | IMM_I(32)));
+ FAIL_IF(push_inst(compiler, SLLI | RD(EQUAL_FLAG) | RS1(OTHER_FLAG) | IMM_I(32)));
+ FAIL_IF(push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(TMP_REG1)));
+ FAIL_IF(push_inst(compiler, OR | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | RS2(EQUAL_FLAG)));
+
+ FAIL_IF(push_inst(compiler, SRLI | RD(TMP_REG1) | RS1(dst) | IMM_I(16)));
+ FAIL_IF(push_inst(compiler, AND | RD(dst) | RS1(dst) | RS2(OTHER_FLAG)));
+ FAIL_IF(push_inst(compiler, AND | RD(TMP_REG1) | RS1(TMP_REG1) | RS2(OTHER_FLAG)));
+ FAIL_IF(push_inst(compiler, SLLI | RD(EQUAL_FLAG) | RS1(OTHER_FLAG) | IMM_I(8)));
+ FAIL_IF(push_inst(compiler, SLLI | RD(dst) | RS1(dst) | IMM_I(16)));
+ FAIL_IF(push_inst(compiler, XOR | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | RS2(EQUAL_FLAG)));
+ FAIL_IF(push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(TMP_REG1)));
+
+ FAIL_IF(push_inst(compiler, SRLI | RD(TMP_REG1) | RS1(dst) | IMM_I(8)));
+ FAIL_IF(push_inst(compiler, AND | RD(dst) | RS1(dst) | RS2(OTHER_FLAG)));
+ FAIL_IF(push_inst(compiler, AND | RD(TMP_REG1) | RS1(TMP_REG1) | RS2(OTHER_FLAG)));
+ FAIL_IF(push_inst(compiler, SLLI | RD(dst) | RS1(dst) | IMM_I(8)));
+ return push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(TMP_REG1));
+ }
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+ FAIL_IF(push_inst(compiler, SRLI | WORD_32 | RD(TMP_REG1) | RS1(src) | IMM_I(16)));
+ FAIL_IF(push_inst(compiler, LUI | RD(OTHER_FLAG) | 0xff0000));
+ FAIL_IF(push_inst(compiler, SLLI | WORD_32 | RD(dst) | RS1(src) | IMM_I(16)));
+ FAIL_IF(push_inst(compiler, ORI | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | IMM_I(0xff)));
+ FAIL_IF(push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(TMP_REG1)));
+
+ FAIL_IF(push_inst(compiler, SRLI | WORD_32 | RD(TMP_REG1) | RS1(dst) | IMM_I(8)));
+ FAIL_IF(push_inst(compiler, AND | RD(dst) | RS1(dst) | RS2(OTHER_FLAG)));
+ FAIL_IF(push_inst(compiler, AND | RD(TMP_REG1) | RS1(TMP_REG1) | RS2(OTHER_FLAG)));
+ FAIL_IF(push_inst(compiler, SLLI | WORD_32 | RD(dst) | RS1(dst) | IMM_I(8)));
+ return push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(TMP_REG1));
+}
+
+static sljit_s32 emit_rev16(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw src)
+{
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ sljit_ins word = (sljit_ins)(op & SLJIT_32) >> 5;
+ sljit_ins word_size = (op & SLJIT_32) ? 32 : 64;
+#else /* !SLJIT_CONFIG_RISCV_64 */
+ sljit_ins word_size = 32;
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+ FAIL_IF(push_inst(compiler, SRLI | WORD | RD(TMP_REG1) | RS1(src) | IMM_I(8)));
+ FAIL_IF(push_inst(compiler, SLLI | WORD | RD(dst) | RS1(src) | IMM_I(word_size - 8)));
+ FAIL_IF(push_inst(compiler, ANDI | RD(TMP_REG1) | RS1(TMP_REG1) | IMM_I(0xff)));
+ FAIL_IF(push_inst(compiler, (GET_OPCODE(op) == SLJIT_REV_U16 ? SRLI : SRAI) | WORD | RD(dst) | RS1(dst) | IMM_I(word_size - 16)));
+ return push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(TMP_REG1));
+}
+
+#define EMIT_LOGICAL(op_imm, op_reg) \
+ if (flags & SRC2_IMM) { \
+ if (op & SLJIT_SET_Z) \
+ FAIL_IF(push_inst(compiler, op_imm | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(src2))); \
+ if (!(flags & UNUSED_DEST)) \
+ FAIL_IF(push_inst(compiler, op_imm | RD(dst) | RS1(src1) | IMM_I(src2))); \
+ } \
+ else { \
+ if (op & SLJIT_SET_Z) \
+ FAIL_IF(push_inst(compiler, op_reg | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); \
+ if (!(flags & UNUSED_DEST)) \
+ FAIL_IF(push_inst(compiler, op_reg | RD(dst) | RS1(src1) | RS2(src2))); \
+ }
+
+#define EMIT_SHIFT(imm, reg) \
+ op_imm = (imm); \
+ op_reg = (reg);
+
+static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
+ sljit_s32 dst, sljit_s32 src1, sljit_sw src2)
+{
+ sljit_s32 is_overflow, is_carry, carry_src_r, is_handled;
+ sljit_ins op_imm, op_reg;
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ sljit_ins word = (sljit_ins)(op & SLJIT_32) >> 5;
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+ SLJIT_ASSERT(WORD == 0 || WORD == 0x8);
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if (dst != src2)
+ return push_inst(compiler, ADDI | RD(dst) | RS1(src2) | IMM_I(0));
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_U8:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE))
+ return push_inst(compiler, ANDI | RD(dst) | RS1(src2) | IMM_I(0xff));
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_S8:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
+ FAIL_IF(push_inst(compiler, SLLI | WORD | RD(dst) | RS1(src2) | IMM_EXTEND(24)));
+ return push_inst(compiler, SRAI | WORD | RD(dst) | RS1(dst) | IMM_EXTEND(24));
+ }
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_U16:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
+ FAIL_IF(push_inst(compiler, SLLI | WORD | RD(dst) | RS1(src2) | IMM_EXTEND(16)));
+ return push_inst(compiler, SRLI | WORD | RD(dst) | RS1(dst) | IMM_EXTEND(16));
+ }
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_S16:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
+ FAIL_IF(push_inst(compiler, SLLI | WORD | RD(dst) | RS1(src2) | IMM_EXTEND(16)));
+ return push_inst(compiler, SRAI | WORD | RD(dst) | RS1(dst) | IMM_EXTEND(16));
+ }
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ case SLJIT_MOV_U32:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
+ FAIL_IF(push_inst(compiler, SLLI | RD(dst) | RS1(src2) | IMM_I(32)));
+ return push_inst(compiler, SRLI | RD(dst) | RS1(dst) | IMM_I(32));
+ }
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_MOV_S32:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE))
+ return push_inst(compiler, ADDI | 0x8 | RD(dst) | RS1(src2) | IMM_I(0));
+ SLJIT_ASSERT(dst == src2);
+ return SLJIT_SUCCESS;
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+ case SLJIT_CLZ:
+ case SLJIT_CTZ:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ return emit_clz_ctz(compiler, op, dst, src2);
+
+ case SLJIT_REV:
+ case SLJIT_REV_S32:
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ case SLJIT_REV_U32:
+#endif /* SLJIT_CONFIG_RISCV_32 */
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ return emit_rev(compiler, op, dst, src2);
+
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
+ return emit_rev16(compiler, op, dst, src2);
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ case SLJIT_REV_U32:
+ SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && dst != TMP_REG1);
+ FAIL_IF(emit_rev(compiler, op, dst, src2));
+ if (dst == TMP_REG2)
+ return SLJIT_SUCCESS;
+ FAIL_IF(push_inst(compiler, SLLI | RD(dst) | RS1(dst) | IMM_I(32)));
+ return push_inst(compiler, SRLI | RD(dst) | RS1(dst) | IMM_I(32));
+#endif /* SLJIT_CONFIG_RISCV_32 */
+
+ case SLJIT_ADD:
+ /* Overflow computation (both add and sub): overflow = src1_sign ^ src2_sign ^ result_sign ^ carry_flag */
+ is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
+ carry_src_r = GET_FLAG_TYPE(op) == SLJIT_CARRY;
+
+ if (flags & SRC2_IMM) {
+ if (is_overflow) {
+ if (src2 >= 0)
+ FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(0)));
+ else
+ FAIL_IF(push_inst(compiler, XORI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(-1)));
+ }
+ else if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(src2)));
+
+ /* Only the zero flag is needed. */
+ if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(src2)));
+ }
+ else {
+ if (is_overflow)
+ FAIL_IF(push_inst(compiler, XOR | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2)));
+ else if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, ADD | WORD | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2)));
+
+ if (is_overflow || carry_src_r != 0) {
+ if (src1 != dst)
+ carry_src_r = (sljit_s32)src1;
+ else if (src2 != dst)
+ carry_src_r = (sljit_s32)src2;
+ else {
+ FAIL_IF(push_inst(compiler, ADDI | RD(OTHER_FLAG) | RS1(src1) | IMM_I(0)));
+ carry_src_r = OTHER_FLAG;
+ }
+ }
+
+ /* Only the zero flag is needed. */
+ if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
+ FAIL_IF(push_inst(compiler, ADD | WORD | RD(dst) | RS1(src1) | RS2(src2)));
+ }
+
+ /* Carry is zero if a + b >= a or a + b >= b, otherwise it is 1. */
+ if (is_overflow || carry_src_r != 0) {
+ if (flags & SRC2_IMM)
+ FAIL_IF(push_inst(compiler, SLTUI | RD(OTHER_FLAG) | RS1(dst) | IMM_I(src2)));
+ else
+ FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(dst) | RS2(carry_src_r)));
+ }
+
+ if (!is_overflow)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst(compiler, XOR | RD(TMP_REG1) | RS1(dst) | RS2(EQUAL_FLAG)));
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(dst) | IMM_I(0)));
+ FAIL_IF(push_inst(compiler, SRLI | WORD | RD(TMP_REG1) | RS1(TMP_REG1) | IMM_EXTEND(31)));
+ return push_inst(compiler, XOR | RD(OTHER_FLAG) | RS1(TMP_REG1) | RS2(OTHER_FLAG));
+
+ case SLJIT_ADDC:
+ carry_src_r = GET_FLAG_TYPE(op) == SLJIT_CARRY;
+
+ if (flags & SRC2_IMM) {
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(src2)));
+ } else {
+ if (carry_src_r != 0) {
+ if (src1 != dst)
+ carry_src_r = (sljit_s32)src1;
+ else if (src2 != dst)
+ carry_src_r = (sljit_s32)src2;
+ else {
+ FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(0)));
+ carry_src_r = EQUAL_FLAG;
+ }
+ }
+
+ FAIL_IF(push_inst(compiler, ADD | WORD | RD(dst) | RS1(src1) | RS2(src2)));
+ }
+
+ /* Carry is zero if a + b >= a or a + b >= b, otherwise it is 1. */
+ if (carry_src_r != 0) {
+ if (flags & SRC2_IMM)
+ FAIL_IF(push_inst(compiler, SLTUI | RD(EQUAL_FLAG) | RS1(dst) | IMM_I(src2)));
+ else
+ FAIL_IF(push_inst(compiler, SLTU | RD(EQUAL_FLAG) | RS1(dst) | RS2(carry_src_r)));
+ }
+
+ FAIL_IF(push_inst(compiler, ADD | WORD | RD(dst) | RS1(dst) | RS2(OTHER_FLAG)));
+
+ if (carry_src_r == 0)
+ return SLJIT_SUCCESS;
+
+ /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */
+ FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(dst) | RS2(OTHER_FLAG)));
+ /* Set carry flag. */
+ return push_inst(compiler, OR | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | RS2(EQUAL_FLAG));
+
+ case SLJIT_SUB:
+ if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG2) | RS1(TMP_ZERO) | IMM_I(src2)));
+ src2 = TMP_REG2;
+ flags &= ~SRC2_IMM;
+ }
+
+ is_handled = 0;
+
+ if (flags & SRC2_IMM) {
+ if (GET_FLAG_TYPE(op) == SLJIT_LESS) {
+ FAIL_IF(push_inst(compiler, SLTUI | RD(OTHER_FLAG) | RS1(src1) | IMM_I(src2)));
+ is_handled = 1;
+ }
+ else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS) {
+ FAIL_IF(push_inst(compiler, SLTI | RD(OTHER_FLAG) | RS1(src1) | IMM_I(src2)));
+ is_handled = 1;
+ }
+ }
+
+ if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) {
+ is_handled = 1;
+
+ if (flags & SRC2_IMM) {
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG2) | RS1(TMP_ZERO) | IMM_I(src2)));
+ src2 = TMP_REG2;
+ flags &= ~SRC2_IMM;
+ }
+
+ switch (GET_FLAG_TYPE(op)) {
+ case SLJIT_LESS:
+ FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(src1) | RS2(src2)));
+ break;
+ case SLJIT_GREATER:
+ FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(src2) | RS2(src1)));
+ break;
+ case SLJIT_SIG_LESS:
+ FAIL_IF(push_inst(compiler, SLT | RD(OTHER_FLAG) | RS1(src1) | RS2(src2)));
+ break;
+ case SLJIT_SIG_GREATER:
+ FAIL_IF(push_inst(compiler, SLT | RD(OTHER_FLAG) | RS1(src2) | RS2(src1)));
+ break;
+ }
+ }
+
+ if (is_handled) {
+ if (flags & SRC2_IMM) {
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(-src2)));
+ if (!(flags & UNUSED_DEST))
+ return push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(-src2));
+ }
+ else {
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SUB | WORD | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2)));
+ if (!(flags & UNUSED_DEST))
+ return push_inst(compiler, SUB | WORD | RD(dst) | RS1(src1) | RS2(src2));
+ }
+ return SLJIT_SUCCESS;
+ }
+
+ is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
+ is_carry = GET_FLAG_TYPE(op) == SLJIT_CARRY;
+
+ if (flags & SRC2_IMM) {
+ if (is_overflow) {
+ if (src2 >= 0)
+ FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(0)));
+ else
+ FAIL_IF(push_inst(compiler, XORI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(-1)));
+ }
+ else if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(-src2)));
+
+ if (is_overflow || is_carry)
+ FAIL_IF(push_inst(compiler, SLTUI | RD(OTHER_FLAG) | RS1(src1) | IMM_I(src2)));
+
+ /* Only the zero flag is needed. */
+ if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(-src2)));
+ }
+ else {
+ if (is_overflow)
+ FAIL_IF(push_inst(compiler, XOR | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2)));
+ else if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, SUB | WORD | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2)));
+
+ if (is_overflow || is_carry)
+ FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(src1) | RS2(src2)));
+
+ /* Only the zero flag is needed. */
+ if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
+ FAIL_IF(push_inst(compiler, SUB | WORD | RD(dst) | RS1(src1) | RS2(src2)));
+ }
+
+ if (!is_overflow)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst(compiler, XOR | RD(TMP_REG1) | RS1(dst) | RS2(EQUAL_FLAG)));
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(dst) | IMM_I(0)));
+ FAIL_IF(push_inst(compiler, SRLI | WORD | RD(TMP_REG1) | RS1(TMP_REG1) | IMM_EXTEND(31)));
+ return push_inst(compiler, XOR | RD(OTHER_FLAG) | RS1(TMP_REG1) | RS2(OTHER_FLAG));
+
+ case SLJIT_SUBC:
+ if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG2) | RS1(TMP_ZERO) | IMM_I(src2)));
+ src2 = TMP_REG2;
+ flags &= ~SRC2_IMM;
+ }
+
+ is_carry = GET_FLAG_TYPE(op) == SLJIT_CARRY;
+
+ if (flags & SRC2_IMM) {
+ if (is_carry)
+ FAIL_IF(push_inst(compiler, SLTUI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(src2)));
+
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(-src2)));
+ }
+ else {
+ if (is_carry)
+ FAIL_IF(push_inst(compiler, SLTU | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2)));
+
+ FAIL_IF(push_inst(compiler, SUB | WORD | RD(dst) | RS1(src1) | RS2(src2)));
+ }
+
+ if (is_carry)
+ FAIL_IF(push_inst(compiler, SLTU | RD(TMP_REG1) | RS1(dst) | RS2(OTHER_FLAG)));
+
+ FAIL_IF(push_inst(compiler, SUB | WORD | RD(dst) | RS1(dst) | RS2(OTHER_FLAG)));
+
+ if (!is_carry)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, OR | RD(OTHER_FLAG) | RS1(EQUAL_FLAG) | RS2(TMP_REG1));
+
+ case SLJIT_MUL:
+ SLJIT_ASSERT(!(flags & SRC2_IMM));
+
+ if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW)
+ return push_inst(compiler, MUL | WORD | RD(dst) | RS1(src1) | RS2(src2));
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ if (word) {
+ FAIL_IF(push_inst(compiler, MUL | RD(OTHER_FLAG) | RS1(src1) | RS2(src2)));
+ FAIL_IF(push_inst(compiler, MUL | 0x8 | RD(dst) | RS1(src1) | RS2(src2)));
+ return push_inst(compiler, SUB | RD(OTHER_FLAG) | RS1(dst) | RS2(OTHER_FLAG));
+ }
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+ FAIL_IF(push_inst(compiler, MULH | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2)));
+ FAIL_IF(push_inst(compiler, MUL | RD(dst) | RS1(src1) | RS2(src2)));
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ FAIL_IF(push_inst(compiler, SRAI | RD(OTHER_FLAG) | RS1(dst) | IMM_I(31)));
+#else /* !SLJIT_CONFIG_RISCV_32 */
+ FAIL_IF(push_inst(compiler, SRAI | RD(OTHER_FLAG) | RS1(dst) | IMM_I(63)));
+#endif /* SLJIT_CONFIG_RISCV_32 */
+ return push_inst(compiler, SUB | RD(OTHER_FLAG) | RS1(EQUAL_FLAG) | RS2(OTHER_FLAG));
+
+ case SLJIT_AND:
+ EMIT_LOGICAL(ANDI, AND);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_OR:
+ EMIT_LOGICAL(ORI, OR);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_XOR:
+ EMIT_LOGICAL(XORI, XOR);
+ return SLJIT_SUCCESS;
+
+ case SLJIT_SHL:
+ case SLJIT_MSHL:
+ EMIT_SHIFT(SLLI, SLL);
+ break;
+
+ case SLJIT_LSHR:
+ case SLJIT_MLSHR:
+ EMIT_SHIFT(SRLI, SRL);
+ break;
+
+ case SLJIT_ASHR:
+ case SLJIT_MASHR:
+ EMIT_SHIFT(SRAI, SRA);
+ break;
+
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
+ if (flags & SRC2_IMM) {
+ SLJIT_ASSERT(src2 != 0);
+
+ op_imm = (GET_OPCODE(op) == SLJIT_ROTL) ? SLLI : SRLI;
+ FAIL_IF(push_inst(compiler, op_imm | WORD | RD(OTHER_FLAG) | RS1(src1) | IMM_I(src2)));
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ src2 = ((op & SLJIT_32) ? 32 : 64) - src2;
+#else /* !SLJIT_CONFIG_RISCV_64 */
+ src2 = 32 - src2;
+#endif /* SLJIT_CONFIG_RISCV_64 */
+ op_imm = (GET_OPCODE(op) == SLJIT_ROTL) ? SRLI : SLLI;
+ FAIL_IF(push_inst(compiler, op_imm | WORD | RD(dst) | RS1(src1) | IMM_I(src2)));
+ return push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(OTHER_FLAG));
+ }
+
+ if (src2 == TMP_ZERO) {
+ if (dst != src1)
+ return push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(0));
+ return SLJIT_SUCCESS;
+ }
+
+ FAIL_IF(push_inst(compiler, SUB | WORD | RD(EQUAL_FLAG) | RS1(TMP_ZERO) | RS2(src2)));
+ op_reg = (GET_OPCODE(op) == SLJIT_ROTL) ? SLL : SRL;
+ FAIL_IF(push_inst(compiler, op_reg | WORD | RD(OTHER_FLAG) | RS1(src1) | RS2(src2)));
+ op_reg = (GET_OPCODE(op) == SLJIT_ROTL) ? SRL : SLL;
+ FAIL_IF(push_inst(compiler, op_reg | WORD | RD(dst) | RS1(src1) | RS2(EQUAL_FLAG)));
+ return push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(OTHER_FLAG));
+
+ default:
+ SLJIT_UNREACHABLE();
+ return SLJIT_SUCCESS;
+ }
+
+ if (flags & SRC2_IMM) {
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, op_imm | WORD | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(src2)));
+
+ if (flags & UNUSED_DEST)
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, op_imm | WORD | RD(dst) | RS1(src1) | IMM_I(src2));
+ }
+
+ if (op & SLJIT_SET_Z)
+ FAIL_IF(push_inst(compiler, op_reg | WORD | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2)));
+
+ if (flags & UNUSED_DEST)
+ return SLJIT_SUCCESS;
+ return push_inst(compiler, op_reg | WORD | RD(dst) | RS1(src1) | RS2(src2));
+}
+
+#undef IMM_EXTEND
+
+static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ /* arg1 goes to TMP_REG1 or src reg
+ arg2 goes to TMP_REG2, imm or src reg
+ TMP_REG3 can be used for caching
+ result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */
+ sljit_s32 dst_r = TMP_REG2;
+ sljit_s32 src1_r;
+ sljit_sw src2_r = 0;
+ sljit_s32 sugg_src2_r = TMP_REG2;
+
+ if (!(flags & ALT_KEEP_CACHE)) {
+ compiler->cache_arg = 0;
+ compiler->cache_argw = 0;
+ }
+
+ if (dst == 0) {
+ SLJIT_ASSERT(HAS_FLAGS(op));
+ flags |= UNUSED_DEST;
+ dst = TMP_REG2;
+ }
+ else if (FAST_IS_REG(dst)) {
+ dst_r = dst;
+ flags |= REG_DEST;
+ if (flags & MOVE_OP)
+ sugg_src2_r = dst_r;
+ }
+ else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, TMP_REG1, dst, dstw))
+ flags |= SLOW_DEST;
+
+ if (flags & IMM_OP) {
+ if (src2 == SLJIT_IMM && src2w != 0 && src2w <= SIMM_MAX && src2w >= SIMM_MIN) {
+ flags |= SRC2_IMM;
+ src2_r = src2w;
+ }
+ else if ((flags & CUMULATIVE_OP) && src1 == SLJIT_IMM && src1w != 0 && src1w <= SIMM_MAX && src1w >= SIMM_MIN) {
+ flags |= SRC2_IMM;
+ src2_r = src1w;
+
+ /* And swap arguments. */
+ src1 = src2;
+ src1w = src2w;
+ src2 = SLJIT_IMM;
+ /* src2w = src2_r unneeded. */
+ }
+ }
+
+ /* Source 1. */
+ if (FAST_IS_REG(src1)) {
+ src1_r = src1;
+ flags |= REG1_SOURCE;
+ }
+ else if (src1 == SLJIT_IMM) {
+ if (src1w) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, src1w, TMP_REG3));
+ src1_r = TMP_REG1;
+ }
+ else
+ src1_r = TMP_ZERO;
+ }
+ else {
+ if (getput_arg_fast(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w))
+ FAIL_IF(compiler->error);
+ else
+ flags |= SLOW_SRC1;
+ src1_r = TMP_REG1;
+ }
+
+ /* Source 2. */
+ if (FAST_IS_REG(src2)) {
+ src2_r = src2;
+ flags |= REG2_SOURCE;
+ if ((flags & (REG_DEST | MOVE_OP)) == MOVE_OP)
+ dst_r = (sljit_s32)src2_r;
+ }
+ else if (src2 == SLJIT_IMM) {
+ if (!(flags & SRC2_IMM)) {
+ if (src2w) {
+ FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w, TMP_REG3));
+ src2_r = sugg_src2_r;
+ }
+ else {
+ src2_r = TMP_ZERO;
+ if (flags & MOVE_OP) {
+ if (dst & SLJIT_MEM)
+ dst_r = 0;
+ else
+ op = SLJIT_MOV;
+ }
+ }
+ }
+ }
+ else {
+ if (getput_arg_fast(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w))
+ FAIL_IF(compiler->error);
+ else
+ flags |= SLOW_SRC2;
+ src2_r = sugg_src2_r;
+ }
+
+ if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) {
+ SLJIT_ASSERT(src2_r == TMP_REG2);
+ if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
+ FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w));
+ FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw));
+ }
+ else {
+ FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w));
+ FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw));
+ }
+ }
+ else if (flags & SLOW_SRC1)
+ FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw));
+ else if (flags & SLOW_SRC2)
+ FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw));
+
+ FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r));
+
+ if (dst & SLJIT_MEM) {
+ if (!(flags & SLOW_DEST)) {
+ getput_arg_fast(compiler, flags, dst_r, dst, dstw);
+ return compiler->error;
+ }
+ return getput_arg(compiler, flags, dst_r, dst, dstw, 0, 0);
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
+{
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ sljit_ins word = (sljit_ins)(op & SLJIT_32) >> 5;
+
+ SLJIT_ASSERT(word == 0 || word == 0x8);
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op0(compiler, op));
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_BREAKPOINT:
+ return push_inst(compiler, EBREAK);
+ case SLJIT_NOP:
+ return push_inst(compiler, ADDI | RD(TMP_ZERO) | RS1(TMP_ZERO) | IMM_I(0));
+ case SLJIT_LMUL_UW:
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(SLJIT_R1) | IMM_I(0)));
+ FAIL_IF(push_inst(compiler, MULHU | RD(SLJIT_R1) | RS1(SLJIT_R0) | RS2(SLJIT_R1)));
+ return push_inst(compiler, MUL | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(TMP_REG1));
+ case SLJIT_LMUL_SW:
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(SLJIT_R1) | IMM_I(0)));
+ FAIL_IF(push_inst(compiler, MULH | RD(SLJIT_R1) | RS1(SLJIT_R0) | RS2(SLJIT_R1)));
+ return push_inst(compiler, MUL | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(TMP_REG1));
+ case SLJIT_DIVMOD_UW:
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(SLJIT_R0) | IMM_I(0)));
+ FAIL_IF(push_inst(compiler, DIVU | WORD | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(SLJIT_R1)));
+ return push_inst(compiler, REMU | WORD | RD(SLJIT_R1) | RS1(TMP_REG1) | RS2(SLJIT_R1));
+ case SLJIT_DIVMOD_SW:
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(SLJIT_R0) | IMM_I(0)));
+ FAIL_IF(push_inst(compiler, DIV | WORD | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(SLJIT_R1)));
+ return push_inst(compiler, REM | WORD | RD(SLJIT_R1) | RS1(TMP_REG1) | RS2(SLJIT_R1));
+ case SLJIT_DIV_UW:
+ return push_inst(compiler, DIVU | WORD | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(SLJIT_R1));
+ case SLJIT_DIV_SW:
+ return push_inst(compiler, DIV | WORD | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(SLJIT_R1));
+ case SLJIT_ENDBR:
+ case SLJIT_SKIP_FRAMES_BEFORE_RETURN:
+ return SLJIT_SUCCESS;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 flags = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ if (op & SLJIT_32)
+ flags = INT_DATA | SIGNED_DATA;
+#endif
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV:
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ case SLJIT_MOV_U32:
+ case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
+#endif
+ case SLJIT_MOV_P:
+ return emit_op(compiler, SLJIT_MOV, WORD_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, srcw);
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ case SLJIT_MOV_U32:
+ return emit_op(compiler, SLJIT_MOV_U32, INT_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_u32)srcw : srcw);
+
+ case SLJIT_MOV_S32:
+ /* Logical operators have no W variant, so sign extended input is necessary for them. */
+ case SLJIT_MOV32:
+ return emit_op(compiler, SLJIT_MOV_S32, INT_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_s32)srcw : srcw);
+#endif
+
+ case SLJIT_MOV_U8:
+ return emit_op(compiler, op, BYTE_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_u8)srcw : srcw);
+
+ case SLJIT_MOV_S8:
+ return emit_op(compiler, op, BYTE_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_s8)srcw : srcw);
+
+ case SLJIT_MOV_U16:
+ return emit_op(compiler, op, HALF_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_u16)srcw : srcw);
+
+ case SLJIT_MOV_S16:
+ return emit_op(compiler, op, HALF_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src == SLJIT_IMM) ? (sljit_s16)srcw : srcw);
+
+ case SLJIT_CLZ:
+ case SLJIT_CTZ:
+ case SLJIT_REV:
+ return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
+
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ return emit_op(compiler, op, HALF_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
+
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+ return emit_op(compiler, op | SLJIT_32, INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
+ }
+
+ SLJIT_UNREACHABLE();
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_s32 flags = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
+
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ if (op & SLJIT_32) {
+ flags |= INT_DATA | SIGNED_DATA;
+ if (src1 == SLJIT_IMM)
+ src1w = (sljit_s32)src1w;
+ if (src2 == SLJIT_IMM)
+ src2w = (sljit_s32)src2w;
+ }
+#endif
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_ADD:
+ case SLJIT_ADDC:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+ return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
+
+ case SLJIT_SUB:
+ case SLJIT_SUBC:
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
+ return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
+
+ case SLJIT_MUL:
+ compiler->status_flags_state = 0;
+ return emit_op(compiler, op, flags | CUMULATIVE_OP, dst, dstw, src1, src1w, src2, src2w);
+
+ case SLJIT_AND:
+ case SLJIT_OR:
+ case SLJIT_XOR:
+ return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
+
+ case SLJIT_SHL:
+ case SLJIT_MSHL:
+ case SLJIT_LSHR:
+ case SLJIT_MLSHR:
+ case SLJIT_ASHR:
+ case SLJIT_MASHR:
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
+ if (src2 == SLJIT_IMM) {
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ src2w &= 0x1f;
+#else /* !SLJIT_CONFIG_RISCV_32 */
+ if (op & SLJIT_32)
+ src2w &= 0x1f;
+ else
+ src2w &= 0x3f;
+#endif /* SLJIT_CONFIG_RISCV_32 */
+ }
+
+ return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
+ }
+
+ SLJIT_UNREACHABLE();
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, 0, 0, src1, src1w, src2, src2w);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
+{
+ sljit_s32 is_left;
+ sljit_ins ins1, ins2, ins3;
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ sljit_ins word = (sljit_ins)(op & SLJIT_32) >> 5;
+ sljit_s32 inp_flags = ((op & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA;
+ sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64;
+#else /* !SLJIT_CONFIG_RISCV_64 */
+ sljit_s32 inp_flags = WORD_DATA | LOAD_DATA;
+ sljit_sw bit_length = 32;
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+ SLJIT_ASSERT(WORD == 0 || WORD == 0x8);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_shift_into(compiler, op, dst_reg, src1_reg, src2_reg, src3, src3w));
+
+ is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL);
+
+ if (src1_reg == src2_reg) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, (is_left ? SLJIT_ROTL : SLJIT_ROTR) | (op & SLJIT_32), dst_reg, 0, src1_reg, 0, src3, src3w);
+ }
+
+ ADJUST_LOCAL_OFFSET(src3, src3w);
+
+ if (src3 == SLJIT_IMM) {
+ src3w &= bit_length - 1;
+
+ if (src3w == 0)
+ return SLJIT_SUCCESS;
+
+ if (is_left) {
+ ins1 = SLLI | WORD | IMM_I(src3w);
+ src3w = bit_length - src3w;
+ ins2 = SRLI | WORD | IMM_I(src3w);
+ } else {
+ ins1 = SRLI | WORD | IMM_I(src3w);
+ src3w = bit_length - src3w;
+ ins2 = SLLI | WORD | IMM_I(src3w);
+ }
+
+ FAIL_IF(push_inst(compiler, ins1 | RD(dst_reg) | RS1(src1_reg)));
+ FAIL_IF(push_inst(compiler, ins2 | RD(TMP_REG1) | RS1(src2_reg)));
+ return push_inst(compiler, OR | RD(dst_reg) | RS1(dst_reg) | RS2(TMP_REG1));
+ }
+
+ if (src3 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, inp_flags, TMP_REG2, src3, src3w));
+ src3 = TMP_REG2;
+ } else if (dst_reg == src3) {
+ push_inst(compiler, ADDI | WORD | RD(TMP_REG2) | RS1(src3) | IMM_I(0));
+ src3 = TMP_REG2;
+ }
+
+ if (is_left) {
+ ins1 = SLL;
+ ins2 = SRLI;
+ ins3 = SRL;
+ } else {
+ ins1 = SRL;
+ ins2 = SLLI;
+ ins3 = SLL;
+ }
+
+ FAIL_IF(push_inst(compiler, ins1 | WORD | RD(dst_reg) | RS1(src1_reg) | RS2(src3)));
+
+ if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) {
+ FAIL_IF(push_inst(compiler, ins2 | WORD | RD(TMP_REG1) | RS1(src2_reg) | IMM_I(1)));
+ FAIL_IF(push_inst(compiler, XORI | RD(TMP_REG2) | RS1(src3) | IMM_I((sljit_ins)bit_length - 1)));
+ src2_reg = TMP_REG1;
+ } else
+ FAIL_IF(push_inst(compiler, SUB | WORD | RD(TMP_REG2) | RS1(TMP_ZERO) | RS2(src3)));
+
+ FAIL_IF(push_inst(compiler, ins3 | WORD | RD(TMP_REG1) | RS1(src2_reg) | RS2(TMP_REG2)));
+ return push_inst(compiler, OR | RD(dst_reg) | RS1(dst_reg) | RS2(TMP_REG1));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_src(compiler, op, src, srcw));
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ switch (op) {
+ case SLJIT_FAST_RETURN:
+ if (FAST_IS_REG(src))
+ FAIL_IF(push_inst(compiler, ADDI | RD(RETURN_ADDR_REG) | RS1(src) | IMM_I(0)));
+ else
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, RETURN_ADDR_REG, src, srcw));
+
+ return push_inst(compiler, JALR | RD(TMP_ZERO) | RS1(RETURN_ADDR_REG) | IMM_I(0));
+ case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN:
+ return SLJIT_SUCCESS;
+ case SLJIT_PREFETCH_L1:
+ case SLJIT_PREFETCH_L2:
+ case SLJIT_PREFETCH_L3:
+ case SLJIT_PREFETCH_ONCE:
+ return SLJIT_SUCCESS;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_s32 dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_dst(compiler, op, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ switch (op) {
+ case SLJIT_FAST_ENTER:
+ if (FAST_IS_REG(dst))
+ return push_inst(compiler, ADDI | RD(dst) | RS1(RETURN_ADDR_REG) | IMM_I(0));
+
+ SLJIT_ASSERT(RETURN_ADDR_REG == TMP_REG2);
+ break;
+ case SLJIT_GET_RETURN_ADDRESS:
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, dst_r, SLJIT_MEM1(SLJIT_SP), compiler->local_size - SSIZE_OF(sw)));
+ break;
+ }
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw);
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
+{
+ CHECK_REG_INDEX(check_sljit_get_register_index(type, reg));
+
+ if (type == SLJIT_GP_REGISTER)
+ return reg_map[reg];
+
+ if (type != SLJIT_FLOAT_REGISTER)
+ return -1;
+
+ return freg_map[reg];
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
+ void *instruction, sljit_u32 size)
+{
+ SLJIT_UNUSED_ARG(size);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
+
+ return push_inst(compiler, *(sljit_ins*)instruction);
+}
+
+/* --------------------------------------------------------------------- */
+/* Floating point operators */
+/* --------------------------------------------------------------------- */
+
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_32) >> 7))
+#define FMT(op) ((sljit_ins)((op & SLJIT_32) ^ SLJIT_32) << 17)
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+# define flags (sljit_u32)0
+#else
+ sljit_u32 flags = ((sljit_u32)(GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64)) << 21;
+#endif
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
+ src = TMP_FREG1;
+ }
+
+ FAIL_IF(push_inst(compiler, FCVT_W_S | FMT(op) | flags | RD(dst_r) | FRS1(src)));
+
+ /* Store the integer value from a VFP register. */
+ if (dst & SLJIT_MEM) {
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ return emit_op_mem2(compiler, WORD_DATA, TMP_REG2, dst, dstw, 0, 0);
+#else
+ return emit_op_mem2(compiler, flags ? WORD_DATA : INT_DATA, TMP_REG2, dst, dstw, 0, 0);
+#endif
+ }
+ return SLJIT_SUCCESS;
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+# undef flags
+#endif
+}
+
+static sljit_s32 sljit_emit_fop1_conv_f64_from_w(struct sljit_compiler *compiler, sljit_ins ins,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+
+ if (src & SLJIT_MEM) {
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw, dst, dstw));
+#else /* SLJIT_CONFIG_RISCV_32 */
+ FAIL_IF(emit_op_mem2(compiler, ((ins & (1 << 21)) ? WORD_DATA : INT_DATA) | LOAD_DATA, TMP_REG1, src, srcw, dst, dstw));
+#endif /* !SLJIT_CONFIG_RISCV_32 */
+ src = TMP_REG1;
+ } else if (src == SLJIT_IMM) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, srcw, TMP_REG3));
+ src = TMP_REG1;
+ }
+
+ FAIL_IF(push_inst(compiler, ins | FRD(dst_r) | RS1(src)));
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem2(compiler, DOUBLE_DATA | ((sljit_s32)(~ins >> 24) & 0x2), TMP_FREG1, dst, dstw, 0, 0);
+ return SLJIT_SUCCESS;
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins ins = FCVT_S_W | FMT(op);
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ if (op & SLJIT_32)
+ ins |= F3(0x7);
+#else /* !SLJIT_CONFIG_RISCV_32 */
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW)
+ ins |= (1 << 21);
+ else if (src == SLJIT_IMM)
+ srcw = (sljit_s32)srcw;
+
+ if (op != SLJIT_CONV_F64_FROM_S32)
+ ins |= F3(0x7);
+#endif /* SLJIT_CONFIG_RISCV_32 */
+
+ return sljit_emit_fop1_conv_f64_from_w(compiler, ins, dst, dstw, src, srcw);
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins ins = FCVT_S_WU | FMT(op);
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ if (op & SLJIT_32)
+ ins |= F3(0x7);
+#else /* !SLJIT_CONFIG_RISCV_32 */
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_UW)
+ ins |= (1 << 21);
+ else if (src == SLJIT_IMM)
+ srcw = (sljit_u32)srcw;
+
+ if (op != SLJIT_CONV_F64_FROM_S32)
+ ins |= F3(0x7);
+#endif /* SLJIT_CONFIG_RISCV_32 */
+
+ return sljit_emit_fop1_conv_f64_from_w(compiler, ins, dst, dstw, src, srcw);
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_ins inst;
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
+ src1 = TMP_FREG1;
+ }
+
+ if (src2 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0));
+ src2 = TMP_FREG2;
+ }
+
+ switch (GET_FLAG_TYPE(op)) {
+ case SLJIT_F_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ inst = FEQ_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src2);
+ break;
+ case SLJIT_F_LESS:
+ case SLJIT_ORDERED_LESS:
+ inst = FLT_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src2);
+ break;
+ case SLJIT_ORDERED_GREATER:
+ inst = FLT_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src2) | FRS2(src1);
+ break;
+ case SLJIT_F_GREATER:
+ case SLJIT_UNORDERED_OR_GREATER:
+ inst = FLE_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src2);
+ break;
+ case SLJIT_UNORDERED_OR_LESS:
+ inst = FLE_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src2) | FRS2(src1);
+ break;
+ case SLJIT_UNORDERED_OR_EQUAL:
+ FAIL_IF(push_inst(compiler, FLT_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src2)));
+ FAIL_IF(push_inst(compiler, FLT_S | FMT(op) | RD(TMP_REG1) | FRS1(src2) | FRS2(src1)));
+ inst = OR | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | RS2(TMP_REG1);
+ break;
+ default: /* SLJIT_UNORDERED */
+ if (src1 == src2) {
+ inst = FEQ_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src1);
+ break;
+ }
+ FAIL_IF(push_inst(compiler, FEQ_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src1)));
+ FAIL_IF(push_inst(compiler, FEQ_S | FMT(op) | RD(TMP_REG1) | FRS1(src2) | FRS2(src2)));
+ inst = AND | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | RS2(TMP_REG1);
+ break;
+ }
+
+ return push_inst(compiler, inst);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 dst_r;
+
+ CHECK_ERROR();
+ compiler->cache_arg = 0;
+ compiler->cache_argw = 0;
+
+ SLJIT_COMPILE_ASSERT((SLJIT_32 == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
+ SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
+
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
+ op ^= SLJIT_32;
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, dst, dstw));
+ src = dst_r;
+ }
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV_F64:
+ if (src != dst_r) {
+ if (dst_r != TMP_FREG1)
+ FAIL_IF(push_inst(compiler, FSGNJ_S | FMT(op) | FRD(dst_r) | FRS1(src) | FRS2(src)));
+ else
+ dst_r = src;
+ }
+ break;
+ case SLJIT_NEG_F64:
+ FAIL_IF(push_inst(compiler, FSGNJN_S | FMT(op) | FRD(dst_r) | FRS1(src) | FRS2(src)));
+ break;
+ case SLJIT_ABS_F64:
+ FAIL_IF(push_inst(compiler, FSGNJX_S | FMT(op) | FRD(dst_r) | FRS1(src) | FRS2(src)));
+ break;
+ case SLJIT_CONV_F64_FROM_F32:
+ /* The SLJIT_32 bit is inverted because sljit_f32 needs to be loaded from the memory. */
+ FAIL_IF(push_inst(compiler, FCVT_S_D | ((op & SLJIT_32) ? (1 << 25) : ((1 << 20) | F3(7))) | FRD(dst_r) | FRS1(src)));
+ op ^= SLJIT_32;
+ break;
+ }
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem2(compiler, FLOAT_DATA(op), dst_r, dst, dstw, 0, 0);
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_s32 dst_r, flags = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
+
+ compiler->cache_arg = 0;
+ compiler->cache_argw = 0;
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2;
+
+ if (src1 & SLJIT_MEM) {
+ if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) {
+ FAIL_IF(compiler->error);
+ src1 = TMP_FREG1;
+ } else
+ flags |= SLOW_SRC1;
+ }
+
+ if (src2 & SLJIT_MEM) {
+ if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) {
+ FAIL_IF(compiler->error);
+ src2 = TMP_FREG2;
+ } else
+ flags |= SLOW_SRC2;
+ }
+
+ if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) {
+ if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w));
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
+ }
+ else {
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
+ }
+ }
+ else if (flags & SLOW_SRC1)
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
+ else if (flags & SLOW_SRC2)
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
+
+ if (flags & SLOW_SRC1)
+ src1 = TMP_FREG1;
+ if (flags & SLOW_SRC2)
+ src2 = TMP_FREG2;
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_ADD_F64:
+ FAIL_IF(push_inst(compiler, FADD_S | FMT(op) | FRD(dst_r) | FRS1(src1) | FRS2(src2)));
+ break;
+
+ case SLJIT_SUB_F64:
+ FAIL_IF(push_inst(compiler, FSUB_S | FMT(op) | FRD(dst_r) | FRS1(src1) | FRS2(src2)));
+ break;
+
+ case SLJIT_MUL_F64:
+ FAIL_IF(push_inst(compiler, FMUL_S | FMT(op) | FRD(dst_r) | FRS1(src1) | FRS2(src2)));
+ break;
+
+ case SLJIT_DIV_F64:
+ FAIL_IF(push_inst(compiler, FDIV_S | FMT(op) | FRD(dst_r) | FRS1(src1) | FRS2(src2)));
+ break;
+
+ case SLJIT_COPYSIGN_F64:
+ return push_inst(compiler, FSGNJ_S | FMT(op) | FRD(dst_r) | FRS1(src1) | FRS2(src2));
+ }
+
+ if (dst_r == TMP_FREG2)
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0));
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
+{
+ union {
+ sljit_s32 imm;
+ sljit_f32 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm == 0)
+ return push_inst(compiler, FMV_W_X | RS1(TMP_ZERO) | FRD(freg));
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm, TMP_REG3));
+ return push_inst(compiler, FMV_W_X | RS1(TMP_REG1) | FRD(freg));
+}
+
+/* --------------------------------------------------------------------- */
+/* Conditional instructions */
+/* --------------------------------------------------------------------- */
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler)
+{
+ struct sljit_label *label;
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_label(compiler));
+
+ if (compiler->last_label && compiler->last_label->size == compiler->size)
+ return compiler->last_label;
+
+ label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label));
+ PTR_FAIL_IF(!label);
+ set_label(label, compiler);
+ return label;
+}
+
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+#define BRANCH_LENGTH ((sljit_ins)(3 * sizeof(sljit_ins)) << 7)
+#else
+#define BRANCH_LENGTH ((sljit_ins)(7 * sizeof(sljit_ins)) << 7)
+#endif
+
+static sljit_ins get_jump_instruction(sljit_s32 type)
+{
+ switch (type) {
+ case SLJIT_EQUAL:
+ return BNE | RS1(EQUAL_FLAG) | RS2(TMP_ZERO);
+ case SLJIT_NOT_EQUAL:
+ return BEQ | RS1(EQUAL_FLAG) | RS2(TMP_ZERO);
+ case SLJIT_LESS:
+ case SLJIT_GREATER:
+ case SLJIT_SIG_LESS:
+ case SLJIT_SIG_GREATER:
+ case SLJIT_OVERFLOW:
+ case SLJIT_CARRY:
+ case SLJIT_F_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ case SLJIT_F_LESS:
+ case SLJIT_ORDERED_LESS:
+ case SLJIT_ORDERED_GREATER:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
+ case SLJIT_ORDERED:
+ return BEQ | RS1(OTHER_FLAG) | RS2(TMP_ZERO);
+ break;
+ case SLJIT_GREATER_EQUAL:
+ case SLJIT_LESS_EQUAL:
+ case SLJIT_SIG_GREATER_EQUAL:
+ case SLJIT_SIG_LESS_EQUAL:
+ case SLJIT_NOT_OVERFLOW:
+ case SLJIT_NOT_CARRY:
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
+ case SLJIT_F_GREATER:
+ case SLJIT_UNORDERED_OR_GREATER:
+ case SLJIT_UNORDERED_OR_LESS:
+ case SLJIT_UNORDERED:
+ return BNE | RS1(OTHER_FLAG) | RS2(TMP_ZERO);
+ default:
+ /* Not conditional branch. */
+ return 0;
+ }
+}
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
+{
+ struct sljit_jump *jump;
+ sljit_ins inst;
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_jump(compiler, type));
+
+ jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
+ PTR_FAIL_IF(!jump);
+ set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
+ type &= 0xff;
+
+ inst = get_jump_instruction(type);
+
+ if (inst != 0) {
+ PTR_FAIL_IF(push_inst(compiler, inst | BRANCH_LENGTH));
+ jump->flags |= IS_COND;
+ }
+
+ jump->addr = compiler->size;
+ inst = JALR | RS1(TMP_REG1) | IMM_I(0);
+
+ if (type >= SLJIT_FAST_CALL) {
+ jump->flags |= IS_CALL;
+ inst |= RD(RETURN_ADDR_REG);
+ }
+
+ PTR_FAIL_IF(push_inst(compiler, inst));
+
+ /* Maximum number of instructions required for generating a constant. */
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ compiler->size += 1;
+#else
+ compiler->size += 5;
+#endif
+ return jump;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+ SLJIT_UNUSED_ARG(arg_types);
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+ if (type & SLJIT_CALL_RETURN) {
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, 0));
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_jump(compiler, type);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ struct sljit_jump *jump;
+ sljit_s32 flags;
+ sljit_ins inst;
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_cmp(compiler, type, src1, src1w, src2, src2w));
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
+
+ compiler->cache_arg = 0;
+ compiler->cache_argw = 0;
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ flags = WORD_DATA | LOAD_DATA;
+#else /* !SLJIT_CONFIG_RISCV_32 */
+ flags = ((type & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA;
+#endif /* SLJIT_CONFIG_RISCV_32 */
+
+ if (src1 & SLJIT_MEM) {
+ PTR_FAIL_IF(emit_op_mem2(compiler, flags, TMP_REG1, src1, src1w, src2, src2w));
+ src1 = TMP_REG1;
+ }
+
+ if (src2 & SLJIT_MEM) {
+ PTR_FAIL_IF(emit_op_mem2(compiler, flags, TMP_REG2, src2, src2w, 0, 0));
+ src2 = TMP_REG2;
+ }
+
+ if (src1 == SLJIT_IMM) {
+ if (src1w != 0) {
+ PTR_FAIL_IF(load_immediate(compiler, TMP_REG1, src1w, TMP_REG3));
+ src1 = TMP_REG1;
+ }
+ else
+ src1 = TMP_ZERO;
+ }
+
+ if (src2 == SLJIT_IMM) {
+ if (src2w != 0) {
+ PTR_FAIL_IF(load_immediate(compiler, TMP_REG2, src2w, TMP_REG3));
+ src2 = TMP_REG2;
+ }
+ else
+ src2 = TMP_ZERO;
+ }
+
+ jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
+ PTR_FAIL_IF(!jump);
+ set_jump(jump, compiler, (sljit_u32)((type & SLJIT_REWRITABLE_JUMP) | IS_COND));
+ type &= 0xff;
+
+ switch (type) {
+ case SLJIT_EQUAL:
+ inst = BNE | RS1(src1) | RS2(src2) | BRANCH_LENGTH;
+ break;
+ case SLJIT_NOT_EQUAL:
+ inst = BEQ | RS1(src1) | RS2(src2) | BRANCH_LENGTH;
+ break;
+ case SLJIT_LESS:
+ inst = BGEU | RS1(src1) | RS2(src2) | BRANCH_LENGTH;
+ break;
+ case SLJIT_GREATER_EQUAL:
+ inst = BLTU | RS1(src1) | RS2(src2) | BRANCH_LENGTH;
+ break;
+ case SLJIT_GREATER:
+ inst = BGEU | RS1(src2) | RS2(src1) | BRANCH_LENGTH;
+ break;
+ case SLJIT_LESS_EQUAL:
+ inst = BLTU | RS1(src2) | RS2(src1) | BRANCH_LENGTH;
+ break;
+ case SLJIT_SIG_LESS:
+ inst = BGE | RS1(src1) | RS2(src2) | BRANCH_LENGTH;
+ break;
+ case SLJIT_SIG_GREATER_EQUAL:
+ inst = BLT | RS1(src1) | RS2(src2) | BRANCH_LENGTH;
+ break;
+ case SLJIT_SIG_GREATER:
+ inst = BGE | RS1(src2) | RS2(src1) | BRANCH_LENGTH;
+ break;
+ case SLJIT_SIG_LESS_EQUAL:
+ inst = BLT | RS1(src2) | RS2(src1) | BRANCH_LENGTH;
+ break;
+ }
+
+ PTR_FAIL_IF(push_inst(compiler, inst));
+
+ jump->addr = compiler->size;
+ PTR_FAIL_IF(push_inst(compiler, JALR | RD(TMP_ZERO) | RS1(TMP_REG1) | IMM_I(0)));
+
+ /* Maximum number of instructions required for generating a constant. */
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ compiler->size += 1;
+#else
+ compiler->size += 5;
+#endif
+ return jump;
+}
+
+#undef BRANCH_LENGTH
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
+{
+ struct sljit_jump *jump;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
+
+ if (src != SLJIT_IMM) {
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw));
+ src = TMP_REG1;
+ }
+ return push_inst(compiler, JALR | RD((type >= SLJIT_FAST_CALL) ? RETURN_ADDR_REG : TMP_ZERO) | RS1(src) | IMM_I(0));
+ }
+
+ /* These jumps are converted to jump/call instructions when possible. */
+ jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
+ FAIL_IF(!jump);
+ set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_CALL : 0));
+ jump->u.target = (sljit_uw)srcw;
+
+ jump->addr = compiler->size;
+ FAIL_IF(push_inst(compiler, JALR | RD((type >= SLJIT_FAST_CALL) ? RETURN_ADDR_REG : TMP_ZERO) | RS1(TMP_REG1) | IMM_I(0)));
+
+ /* Maximum number of instructions required for generating a constant. */
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ compiler->size += 1;
+#else
+ compiler->size += 5;
+#endif
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ SLJIT_UNUSED_ARG(arg_types);
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw));
+ src = TMP_REG1;
+ }
+
+ if (type & SLJIT_CALL_RETURN) {
+ if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(src) | IMM_I(0)));
+ src = TMP_REG1;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
+ type = SLJIT_JUMP;
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, type, src, srcw);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 type)
+{
+ sljit_s32 src_r, dst_r, invert;
+ sljit_s32 saved_op = op;
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ sljit_s32 mem_type = WORD_DATA;
+#else
+ sljit_s32 mem_type = ((op & SLJIT_32) || op == SLJIT_MOV32) ? (INT_DATA | SIGNED_DATA) : WORD_DATA;
+#endif
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ op = GET_OPCODE(op);
+ dst_r = (op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2;
+
+ compiler->cache_arg = 0;
+ compiler->cache_argw = 0;
+
+ if (op >= SLJIT_ADD && (dst & SLJIT_MEM))
+ FAIL_IF(emit_op_mem2(compiler, mem_type | LOAD_DATA, TMP_REG1, dst, dstw, dst, dstw));
+
+ if (type < SLJIT_F_EQUAL) {
+ src_r = OTHER_FLAG;
+ invert = type & 0x1;
+
+ switch (type) {
+ case SLJIT_EQUAL:
+ case SLJIT_NOT_EQUAL:
+ FAIL_IF(push_inst(compiler, SLTUI | RD(dst_r) | RS1(EQUAL_FLAG) | IMM_I(1)));
+ src_r = dst_r;
+ break;
+ case SLJIT_OVERFLOW:
+ case SLJIT_NOT_OVERFLOW:
+ if (compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)) {
+ src_r = OTHER_FLAG;
+ break;
+ }
+ FAIL_IF(push_inst(compiler, SLTUI | RD(dst_r) | RS1(OTHER_FLAG) | IMM_I(1)));
+ src_r = dst_r;
+ invert ^= 0x1;
+ break;
+ }
+ } else {
+ invert = 0;
+ src_r = OTHER_FLAG;
+
+ switch (type) {
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL: /* Not supported. */
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
+ case SLJIT_F_GREATER:
+ case SLJIT_UNORDERED_OR_GREATER:
+ case SLJIT_UNORDERED_OR_LESS:
+ case SLJIT_UNORDERED:
+ invert = 1;
+ break;
+ }
+ }
+
+ if (invert) {
+ FAIL_IF(push_inst(compiler, XORI | RD(dst_r) | RS1(src_r) | IMM_I(1)));
+ src_r = dst_r;
+ }
+
+ if (op < SLJIT_ADD) {
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, mem_type, src_r, dst, dstw);
+
+ if (src_r != dst_r)
+ return push_inst(compiler, ADDI | RD(dst_r) | RS1(src_r) | IMM_I(0));
+ return SLJIT_SUCCESS;
+ }
+
+ mem_type |= CUMULATIVE_OP | IMM_OP | ALT_KEEP_CACHE;
+
+ if (dst & SLJIT_MEM)
+ return emit_op(compiler, saved_op, mem_type, dst, dstw, TMP_REG1, 0, src_r, 0);
+ return emit_op(compiler, saved_op, mem_type, dst, dstw, dst, dstw, src_r, 0);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_reg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
+{
+ sljit_ins *ptr;
+ sljit_uw size;
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ sljit_ins word = (sljit_ins)(type & SLJIT_32) >> 5;
+ sljit_s32 inp_flags = ((type & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA;
+#else /* !SLJIT_CONFIG_RISCV_64 */
+ sljit_s32 inp_flags = WORD_DATA | LOAD_DATA;
+#endif /* SLJIT_CONFIG_RISCV_64 */
+
+ SLJIT_ASSERT(WORD == 0 || WORD == 0x8);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_select(compiler, type, dst_reg, src1, src1w, src2_reg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (dst_reg != src2_reg) {
+ if (dst_reg == src1) {
+ src1 = src2_reg;
+ src1w = 0;
+ type ^= 0x1;
+ } else {
+ if (ADDRESSING_DEPENDS_ON(src1, dst_reg)) {
+ FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG2) | RS1(dst_reg) | IMM_I(0)));
+
+ if ((src1 & REG_MASK) == dst_reg)
+ src1 = (src1 & ~REG_MASK) | TMP_REG2;
+
+ if (OFFS_REG(src1) == dst_reg)
+ src1 = (src1 & ~OFFS_REG_MASK) | TO_OFFS_REG(TMP_REG2);
+ }
+
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst_reg) | RS1(src2_reg) | IMM_I(0)));
+ }
+ }
+
+ size = compiler->size;
+
+ ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
+ FAIL_IF(!ptr);
+ compiler->size++;
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, inp_flags, dst_reg, src1, src1w));
+ } else if (src1 == SLJIT_IMM) {
+#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
+ if (word)
+ src1w = (sljit_s32)src1w;
+#endif /* SLJIT_CONFIG_RISCV_64 */
+ FAIL_IF(load_immediate(compiler, dst_reg, src1w, TMP_REG1));
+ } else
+ FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst_reg) | RS1(src1) | IMM_I(0)));
+
+ *ptr = get_jump_instruction(type & ~SLJIT_32) | (sljit_ins)((compiler->size - size) << 9);
+ return SLJIT_SUCCESS;
+}
+
+#undef WORD
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
+{
+ sljit_ins *ptr;
+ sljit_uw size;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fselect(compiler, type, dst_freg, src1, src1w, src2_freg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (dst_freg != src2_freg) {
+ if (dst_freg == src1) {
+ src1 = src2_freg;
+ src1w = 0;
+ type ^= 0x1;
+ } else
+ FAIL_IF(push_inst(compiler, FSGNJ_S | FMT(type) | FRD(dst_freg) | FRS1(src2_freg) | FRS2(src2_freg)));
+ }
+
+ size = compiler->size;
+
+ ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
+ FAIL_IF(!ptr);
+ compiler->size++;
+
+ if (src1 & SLJIT_MEM)
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(type) | LOAD_DATA, dst_freg, src1, src1w));
+ else
+ FAIL_IF(push_inst(compiler, FSGNJ_S | FMT(type) | FRD(dst_freg) | FRS1(src1) | FRS2(src1)));
+
+ *ptr = get_jump_instruction(type & ~SLJIT_32) | (sljit_ins)((compiler->size - size) << 9);
+ return SLJIT_SUCCESS;
+}
+
+#undef FLOAT_DATA
+#undef FMT
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_s32 flags;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ if (!(reg & REG_PAIR_MASK))
+ return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ memw &= 0x3;
+
+ if (SLJIT_UNLIKELY(memw != 0)) {
+ FAIL_IF(push_inst(compiler, SLLI | RD(TMP_REG1) | RS1(OFFS_REG(mem)) | IMM_I(memw)));
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RS1(TMP_REG1) | RS2(mem & REG_MASK)));
+ } else
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RS1(mem & REG_MASK) | RS2(OFFS_REG(mem))));
+
+ mem = TMP_REG1;
+ memw = 0;
+ } else if (memw > SIMM_MAX - SSIZE_OF(sw) || memw < SIMM_MIN) {
+ if (((memw + 0x800) & 0xfff) <= 0xfff - SSIZE_OF(sw)) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, TO_ARGW_HI(memw), TMP_REG3));
+ memw &= 0xfff;
+ } else {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, memw, TMP_REG3));
+ memw = 0;
+ }
+
+ if (mem & REG_MASK)
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RS1(TMP_REG1) | RS2(mem & REG_MASK)));
+
+ mem = TMP_REG1;
+ } else {
+ mem &= REG_MASK;
+ memw &= 0xfff;
+ }
+
+ SLJIT_ASSERT((memw >= 0 && memw <= SIMM_MAX - SSIZE_OF(sw)) || (memw > SIMM_MAX && memw <= 0xfff));
+
+ if (!(type & SLJIT_MEM_STORE) && mem == REG_PAIR_FIRST(reg)) {
+ FAIL_IF(push_mem_inst(compiler, WORD_DATA | LOAD_DATA, REG_PAIR_SECOND(reg), mem, (memw + SSIZE_OF(sw)) & 0xfff));
+ return push_mem_inst(compiler, WORD_DATA | LOAD_DATA, REG_PAIR_FIRST(reg), mem, memw);
+ }
+
+ flags = WORD_DATA | (!(type & SLJIT_MEM_STORE) ? LOAD_DATA : 0);
+
+ FAIL_IF(push_mem_inst(compiler, flags, REG_PAIR_FIRST(reg), mem, memw));
+ return push_mem_inst(compiler, flags, REG_PAIR_SECOND(reg), mem, (memw + SSIZE_OF(sw)) & 0xfff);
+}
+
+#undef TO_ARGW_HI
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
+{
+ struct sljit_const *const_;
+ sljit_s32 dst_r;
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const));
+ PTR_FAIL_IF(!const_);
+ set_const(const_, compiler);
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
+ PTR_FAIL_IF(emit_const(compiler, dst_r, init_value, ADDI | RD(dst_r)));
+
+ if (dst & SLJIT_MEM)
+ PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw));
+
+ return const_;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+{
+ struct sljit_put_label *put_label;
+ sljit_s32 dst_r;
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label));
+ PTR_FAIL_IF(!put_label);
+ set_put_label(put_label, compiler, 0);
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
+ PTR_FAIL_IF(push_inst(compiler, (sljit_ins)dst_r));
+#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32)
+ compiler->size += 1;
+#else
+ compiler->size += 5;
+#endif
+
+ if (dst & SLJIT_MEM)
+ PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw));
+
+ return put_label;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
+{
+ sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
+}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeS390X.c b/src/3rdparty/pcre2/src/sljit/sljitNativeS390X.c
index 716491ec72..67516f9b32 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeS390X.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeS390X.c
@@ -44,8 +44,11 @@ typedef sljit_uw sljit_ins;
/* Instruction tags (most significant halfword). */
static const sljit_ins sljit_ins_const = (sljit_ins)1 << 48;
-static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = {
- 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 0, 1
+#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
+#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
+
+static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
+ 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 0, 1, 14
};
/* there are also a[2-15] available, but they are slower to access and
@@ -80,7 +83,7 @@ static const sljit_gpr r10 = 10; /* reg_map[9] */
static const sljit_gpr r11 = 11; /* reg_map[10] */
static const sljit_gpr r12 = 12; /* reg_map[11]: GOT */
static const sljit_gpr r13 = 13; /* reg_map[12]: Literal Pool pointer */
-static const sljit_gpr r14 = 14; /* reg_map[0]: return address and flag register */
+static const sljit_gpr r14 = 14; /* reg_map[0]: return address */
static const sljit_gpr r15 = 15; /* reg_map[SLJIT_NUMBER_OF_REGISTERS + 1]: stack pointer */
/* WARNING: r12 and r13 shouldn't be used as per ABI recommendation */
@@ -93,24 +96,37 @@ static const sljit_gpr r15 = 15; /* reg_map[SLJIT_NUMBER_OF_REGISTERS + 1]: stac
#define tmp0 r0
#define tmp1 r1
-/* TODO(carenas): flags should move to a different register so that
- * link register doesn't need to change
- */
+/* When reg cannot be unused. */
+#define IS_GPR_REG(reg) ((reg > 0) && (reg) <= SLJIT_SP)
-/* Link registers. The normal link register is r14, but since
- we use that for flags we need to use r0 instead to do fast
- calls so that flags are preserved. */
+/* Link register. */
static const sljit_gpr link_r = 14; /* r14 */
-static const sljit_gpr fast_link_r = 0; /* r0 */
-/* Flag register layout:
+#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2] = {
+ 0, 0, 2, 4, 6, 3, 5, 7, 15, 14, 13, 12, 11, 10, 9, 8, 1
+};
- 0 32 33 34 36 64
- +---------------+---+---+-------+-------+
- | ZERO | 0 | 0 | C C |///////|
- +---------------+---+---+-------+-------+
-*/
-static const sljit_gpr flag_r = 14; /* r14 */
+#define R0A(r) (r)
+#define R4A(r) ((r) << 4)
+#define R8A(r) ((r) << 8)
+#define R12A(r) ((r) << 12)
+#define R16A(r) ((r) << 16)
+#define R20A(r) ((r) << 20)
+#define R28A(r) ((r) << 28)
+#define R32A(r) ((r) << 32)
+#define R36A(r) ((r) << 36)
+
+#define R0(r) ((sljit_ins)reg_map[r])
+
+#define F0(r) ((sljit_ins)freg_map[r])
+#define F4(r) (R4A((sljit_ins)freg_map[r]))
+#define F12(r) (R12A((sljit_ins)freg_map[r]))
+#define F20(r) (R20A((sljit_ins)freg_map[r]))
+#define F28(r) (R28A((sljit_ins)freg_map[r]))
+#define F32(r) (R32A((sljit_ins)freg_map[r]))
+#define F36(r) (R36A((sljit_ins)freg_map[r]))
struct sljit_s390x_const {
struct sljit_const const_; /* must be first */
@@ -129,14 +145,14 @@ static SLJIT_INLINE sljit_uw sizeof_ins(sljit_ins ins)
{
/* keep faulting instructions */
if (ins == 0)
- return 2;
+ return 2;
if ((ins & 0x00000000ffffL) == ins)
- return 2;
+ return 2;
if ((ins & 0x0000ffffffffL) == ins)
- return 4;
+ return 4;
if ((ins & 0xffffffffffffL) == ins)
- return 6;
+ return 6;
SLJIT_UNREACHABLE();
return (sljit_uw)-1;
@@ -172,7 +188,8 @@ static sljit_s32 encode_inst(void **ptr, sljit_ins ins)
}
#define SLJIT_ADD_SUB_NO_COMPARE(status_flags_state) \
- (((status_flags_state) & (SLJIT_CURRENT_FLAGS_ADD_SUB | SLJIT_CURRENT_FLAGS_COMPARE)) == SLJIT_CURRENT_FLAGS_ADD_SUB)
+ (((status_flags_state) & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)) \
+ && !((status_flags_state) & SLJIT_CURRENT_FLAGS_COMPARE))
/* Map the given type to a 4-bit condition code mask. */
static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 type) {
@@ -191,8 +208,11 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
return (cc0 | cc3);
return (cc0 | cc2);
}
+ /* fallthrough */
- case SLJIT_EQUAL_F64:
+ case SLJIT_ATOMIC_STORED:
+ case SLJIT_F_EQUAL:
+ case SLJIT_ORDERED_EQUAL:
return cc0;
case SLJIT_NOT_EQUAL:
@@ -204,14 +224,17 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
return (cc1 | cc2);
return (cc1 | cc3);
}
+ /* fallthrough */
- case SLJIT_NOT_EQUAL_F64:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
return (cc1 | cc2 | cc3);
case SLJIT_LESS:
+ case SLJIT_ATOMIC_NOT_STORED:
return cc1;
case SLJIT_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
return (cc0 | cc2 | cc3);
case SLJIT_GREATER:
@@ -225,14 +248,27 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
return (cc0 | cc1 | cc2);
case SLJIT_SIG_LESS:
- case SLJIT_LESS_F64:
+ case SLJIT_F_LESS:
+ case SLJIT_ORDERED_LESS:
return cc1;
+ case SLJIT_NOT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_SUB)
+ return (cc2 | cc3);
+ /* fallthrough */
+
case SLJIT_SIG_LESS_EQUAL:
- case SLJIT_LESS_EQUAL_F64:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
return (cc0 | cc1);
+ case SLJIT_CARRY:
+ if (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_SUB)
+ return (cc0 | cc1);
+ /* fallthrough */
+
case SLJIT_SIG_GREATER:
+ case SLJIT_UNORDERED_OR_GREATER:
/* Overflow is considered greater, see SLJIT_SUB. */
return cc2 | cc3;
@@ -242,22 +278,39 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
case SLJIT_OVERFLOW:
if (compiler->status_flags_state & SLJIT_SET_Z)
return (cc2 | cc3);
+ /* fallthrough */
- case SLJIT_UNORDERED_F64:
+ case SLJIT_UNORDERED:
return cc3;
case SLJIT_NOT_OVERFLOW:
if (compiler->status_flags_state & SLJIT_SET_Z)
return (cc0 | cc1);
+ /* fallthrough */
- case SLJIT_ORDERED_F64:
+ case SLJIT_ORDERED:
return (cc0 | cc1 | cc2);
- case SLJIT_GREATER_F64:
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ return (cc1 | cc2);
+
+ case SLJIT_F_GREATER:
+ case SLJIT_ORDERED_GREATER:
return cc2;
- case SLJIT_GREATER_EQUAL_F64:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
return (cc0 | cc2);
+
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
+ return (cc0 | cc1 | cc3);
+
+ case SLJIT_UNORDERED_OR_EQUAL:
+ return (cc0 | cc3);
+
+ case SLJIT_UNORDERED_OR_LESS:
+ return (cc1 | cc3);
}
SLJIT_UNREACHABLE();
@@ -396,10 +449,12 @@ HAVE_FACILITY(have_misc2, MISCELLANEOUS_INSTRUCTION_EXTENSIONS_2_FACILITY)
static SLJIT_INLINE sljit_ins disp_s20(sljit_s32 d)
{
+ sljit_uw dh, dl;
+
SLJIT_ASSERT(is_s20(d));
- sljit_uw dh = (d >> 12) & 0xff;
- sljit_uw dl = (d << 8) & 0xfff00;
+ dh = (d >> 12) & 0xff;
+ dl = ((sljit_uw)d << 8) & 0xfff00;
return (dh | dl) << 8;
}
@@ -444,7 +499,7 @@ SLJIT_S390X_RR(or, 0x1600)
#define SLJIT_S390X_RRE(name, pattern) \
SLJIT_S390X_INSTRUCTION(name, sljit_gpr dst, sljit_gpr src) \
{ \
- return (pattern) | ((dst & 0xf) << 4) | (src & 0xf); \
+ return (pattern) | R4A(dst) | R0A(src); \
}
/* AND */
@@ -504,7 +559,7 @@ SLJIT_S390X_RRE(sgr, 0xb9090000)
#define SLJIT_S390X_RIA(name, pattern, imm_type) \
SLJIT_S390X_INSTRUCTION(name, sljit_gpr reg, imm_type imm) \
{ \
- return (pattern) | ((reg & 0xf) << 20) | (imm & 0xffff); \
+ return (pattern) | R20A(reg) | (imm & 0xffff); \
}
/* ADD HALFWORD IMMEDIATE */
@@ -534,7 +589,7 @@ SLJIT_S390X_RIA(oilh, 0xa50a0000, sljit_u16)
SLJIT_S390X_INSTRUCTION(name, sljit_gpr reg, imm_type imm) \
{ \
SLJIT_ASSERT(have_eimm()); \
- return (pattern) | ((sljit_ins)(reg & 0xf) << 36) | (imm & 0xffffffff); \
+ return (pattern) | R36A(reg) | ((sljit_ins)imm & 0xffffffffu); \
}
/* ADD IMMEDIATE */
@@ -567,17 +622,11 @@ SLJIT_S390X_RILA(slfi, 0xc20500000000, sljit_u32)
/* RX-a form instructions */
#define SLJIT_S390X_RXA(name, pattern) \
-SLJIT_S390X_INSTRUCTION(name, sljit_gpr r, sljit_u16 d, sljit_gpr x, sljit_gpr b) \
+SLJIT_S390X_INSTRUCTION(name, sljit_gpr r, sljit_s32 d, sljit_gpr x, sljit_gpr b) \
{ \
- sljit_ins ri, xi, bi, di; \
-\
SLJIT_ASSERT((d & 0xfff) == d); \
- ri = (sljit_ins)(r & 0xf) << 20; \
- xi = (sljit_ins)(x & 0xf) << 16; \
- bi = (sljit_ins)(b & 0xf) << 12; \
- di = (sljit_ins)(d & 0xfff); \
\
- return (pattern) | ri | xi | bi | di; \
+ return (pattern) | R20A(r) | R16A(x) | R12A(b) | (sljit_ins)(d & 0xfff); \
}
/* LOAD */
@@ -607,15 +656,9 @@ SLJIT_S390X_RXA(sth, 0x40000000)
#define SLJIT_S390X_RXYA(name, pattern, cond) \
SLJIT_S390X_INSTRUCTION(name, sljit_gpr r, sljit_s32 d, sljit_gpr x, sljit_gpr b) \
{ \
- sljit_ins ri, xi, bi, di; \
-\
SLJIT_ASSERT(cond); \
- ri = (sljit_ins)(r & 0xf) << 36; \
- xi = (sljit_ins)(x & 0xf) << 32; \
- bi = (sljit_ins)(b & 0xf) << 28; \
- di = disp_s20(d); \
\
- return (pattern) | ri | xi | bi | di; \
+ return (pattern) | R36A(r) | R32A(x) | R28A(b) | disp_s20(d); \
}
/* LOAD */
@@ -660,17 +703,11 @@ SLJIT_S390X_RXYA(sthy, 0xe30000000070, have_ldisp())
/* RSY-a instructions */
#define SLJIT_S390X_RSYA(name, pattern, cond) \
-SLJIT_S390X_INSTRUCTION(name, sljit_gpr dst, sljit_gpr src, sljit_sw d, sljit_gpr b) \
+SLJIT_S390X_INSTRUCTION(name, sljit_gpr dst, sljit_gpr src, sljit_s32 d, sljit_gpr b) \
{ \
- sljit_ins r1, r3, b2, d2; \
-\
SLJIT_ASSERT(cond); \
- r1 = (sljit_ins)(dst & 0xf) << 36; \
- r3 = (sljit_ins)(src & 0xf) << 32; \
- b2 = (sljit_ins)(b & 0xf) << 28; \
- d2 = disp_s20(d); \
\
- return (pattern) | r1 | r3 | b2 | d2; \
+ return (pattern) | R36A(dst) | R32A(src) | R28A(b) | disp_s20(d); \
}
/* LOAD MULTIPLE */
@@ -691,16 +728,14 @@ SLJIT_S390X_RSYA(stmg, 0xeb0000000024, 1)
#define SLJIT_S390X_RIEF(name, pattern) \
SLJIT_S390X_INSTRUCTION(name, sljit_gpr dst, sljit_gpr src, sljit_u8 start, sljit_u8 end, sljit_u8 rot) \
{ \
- sljit_ins r1, r2, i3, i4, i5; \
+ sljit_ins i3, i4, i5; \
\
SLJIT_ASSERT(have_genext()); \
- r1 = (sljit_ins)(dst & 0xf) << 36; \
- r2 = (sljit_ins)(src & 0xf) << 32; \
i3 = (sljit_ins)start << 24; \
i4 = (sljit_ins)end << 16; \
i5 = (sljit_ins)rot << 8; \
\
- return (pattern) | r1 | r2 | i3 | i4 | i5; \
+ return (pattern) | R36A(dst & 0xf) | R32A(src & 0xf) | i3 | i4 | i5; \
}
/* ROTATE THEN AND SELECTED BITS */
@@ -728,14 +763,12 @@ SLJIT_S390X_RIEF(risbhg, 0xec000000005d)
#define SLJIT_S390X_RRFC(name, pattern) \
SLJIT_S390X_INSTRUCTION(name, sljit_gpr dst, sljit_gpr src, sljit_uw mask) \
{ \
- sljit_ins r1, r2, m3; \
+ sljit_ins m3; \
\
SLJIT_ASSERT(have_lscond1()); \
- r1 = (sljit_ins)(dst & 0xf) << 4; \
- r2 = (sljit_ins)(src & 0xf); \
m3 = (sljit_ins)(mask & 0xf) << 12; \
\
- return (pattern) | m3 | r1 | r2; \
+ return (pattern) | m3 | R4A(dst) | R0A(src); \
}
/* LOAD HALFWORD IMMEDIATE ON CONDITION */
@@ -748,14 +781,13 @@ SLJIT_S390X_RRFC(locgr, 0xb9e20000)
#define SLJIT_S390X_RIEG(name, pattern) \
SLJIT_S390X_INSTRUCTION(name, sljit_gpr reg, sljit_sw imm, sljit_uw mask) \
{ \
- sljit_ins r1, m3, i2; \
+ sljit_ins m3, i2; \
\
SLJIT_ASSERT(have_lscond2()); \
- r1 = (sljit_ins)(reg & 0xf) << 36; \
m3 = (sljit_ins)(mask & 0xf) << 32; \
i2 = (sljit_ins)(imm & 0xffffL) << 16; \
\
- return (pattern) | r1 | m3 | i2; \
+ return (pattern) | R36A(reg) | m3 | i2; \
}
/* LOAD HALFWORD IMMEDIATE ON CONDITION */
@@ -767,13 +799,9 @@ SLJIT_S390X_RIEG(locghi, 0xec0000000046)
#define SLJIT_S390X_RILB(name, pattern, cond) \
SLJIT_S390X_INSTRUCTION(name, sljit_gpr reg, sljit_sw ri) \
{ \
- sljit_ins r1, ri2; \
-\
SLJIT_ASSERT(cond); \
- r1 = (sljit_ins)(reg & 0xf) << 36; \
- ri2 = (sljit_ins)(ri & 0xffffffff); \
\
- return (pattern) | r1 | ri2; \
+ return (pattern) | R36A(reg) | (sljit_ins)(ri & 0xffffffff); \
}
/* BRANCH RELATIVE AND SAVE LONG */
@@ -808,22 +836,20 @@ SLJIT_S390X_INSTRUCTION(brcl, sljit_uw mask, sljit_sw target)
SLJIT_S390X_INSTRUCTION(flogr, sljit_gpr dst, sljit_gpr src)
{
- sljit_ins r1 = ((sljit_ins)dst & 0xf) << 8;
- sljit_ins r2 = ((sljit_ins)src & 0xf);
SLJIT_ASSERT(have_eimm());
- return 0xb9830000 | r1 | r2;
+ return 0xb9830000 | R8A(dst) | R0A(src);
}
/* INSERT PROGRAM MASK */
SLJIT_S390X_INSTRUCTION(ipm, sljit_gpr dst)
{
- return 0xb2220000 | ((sljit_ins)(dst & 0xf) << 4);
+ return 0xb2220000 | R4A(dst);
}
/* SET PROGRAM MASK */
SLJIT_S390X_INSTRUCTION(spm, sljit_gpr dst)
{
- return 0x0400 | ((sljit_ins)(dst & 0xf) << 4);
+ return 0x0400 | R4A(dst);
}
/* ROTATE THEN INSERT SELECTED BITS HIGH (ZERO) */
@@ -842,12 +868,12 @@ static sljit_s32 update_zero_overflow(struct sljit_compiler *compiler, sljit_s32
1 (non-zero and no overflow) : unchanged
2 (zero and overflow) : decreased by 1
3 (non-zero and overflow) : decreased by 1 if non-zero */
- FAIL_IF(push_inst(compiler, brc(0xc, 2 + 2 + ((op & SLJIT_I32_OP) ? 1 : 2) + 2 + 3 + 1)));
- FAIL_IF(push_inst(compiler, ipm(flag_r)));
- FAIL_IF(push_inst(compiler, (op & SLJIT_I32_OP) ? or(dst_r, dst_r) : ogr(dst_r, dst_r)));
+ FAIL_IF(push_inst(compiler, brc(0xc, 2 + 2 + ((op & SLJIT_32) ? 1 : 2) + 2 + 3 + 1)));
+ FAIL_IF(push_inst(compiler, ipm(tmp1)));
+ FAIL_IF(push_inst(compiler, (op & SLJIT_32) ? or(dst_r, dst_r) : ogr(dst_r, dst_r)));
FAIL_IF(push_inst(compiler, brc(0x8, 2 + 3)));
- FAIL_IF(push_inst(compiler, slfi(flag_r, 0x10000000)));
- FAIL_IF(push_inst(compiler, spm(flag_r)));
+ FAIL_IF(push_inst(compiler, slfi(tmp1, 0x10000000)));
+ FAIL_IF(push_inst(compiler, spm(tmp1)));
return SLJIT_SUCCESS;
}
@@ -858,40 +884,35 @@ static sljit_s32 push_load_imm_inst(struct sljit_compiler *compiler, sljit_gpr t
if (is_s16(v))
return push_inst(compiler, lghi(target, (sljit_s16)v));
- if ((sljit_uw)v == (v & 0x000000000000ffffU))
+ if (((sljit_uw)v & ~(sljit_uw)0x000000000000ffff) == 0)
return push_inst(compiler, llill(target, (sljit_u16)v));
- if ((sljit_uw)v == (v & 0x00000000ffff0000U))
+ if (((sljit_uw)v & ~(sljit_uw)0x00000000ffff0000) == 0)
return push_inst(compiler, llilh(target, (sljit_u16)(v >> 16)));
- if ((sljit_uw)v == (v & 0x0000ffff00000000U))
+ if (((sljit_uw)v & ~(sljit_uw)0x0000ffff00000000) == 0)
return push_inst(compiler, llihl(target, (sljit_u16)(v >> 32)));
- if ((sljit_uw)v == (v & 0xffff000000000000U))
+ if (((sljit_uw)v & ~(sljit_uw)0xffff000000000000) == 0)
return push_inst(compiler, llihh(target, (sljit_u16)(v >> 48)));
- /* 6 byte instructions (requires extended immediate facility) */
- if (have_eimm()) {
- if (is_s32(v))
- return push_inst(compiler, lgfi(target, (sljit_s32)v));
+ if (is_s32(v))
+ return push_inst(compiler, lgfi(target, (sljit_s32)v));
- if ((sljit_uw)v == (v & 0x00000000ffffffffU))
- return push_inst(compiler, llilf(target, (sljit_u32)v));
+ if (((sljit_uw)v >> 32) == 0)
+ return push_inst(compiler, llilf(target, (sljit_u32)v));
- if ((sljit_uw)v == (v & 0xffffffff00000000U))
- return push_inst(compiler, llihf(target, (sljit_u32)(v >> 32)));
+ if (((sljit_uw)v << 32) == 0)
+ return push_inst(compiler, llihf(target, (sljit_u32)((sljit_uw)v >> 32)));
- FAIL_IF(push_inst(compiler, llilf(target, (sljit_u32)v)));
- return push_inst(compiler, iihf(target, (sljit_u32)(v >> 32)));
- }
- /* TODO(mundaym): instruction sequences that don't use extended immediates */
- abort();
+ FAIL_IF(push_inst(compiler, llilf(target, (sljit_u32)v)));
+ return push_inst(compiler, iihf(target, (sljit_u32)(v >> 32)));
}
struct addr {
sljit_gpr base;
sljit_gpr index;
- sljit_sw offset;
+ sljit_s32 offset;
};
/* transform memory operand into D(X,B) form with a signed 20-bit offset */
@@ -911,7 +932,7 @@ static sljit_s32 make_addr_bxy(struct sljit_compiler *compiler,
if (off != 0) {
/* shift and put the result into tmp */
SLJIT_ASSERT(0 <= off && off < 64);
- FAIL_IF(push_inst(compiler, sllg(tmp, index, off, 0)));
+ FAIL_IF(push_inst(compiler, sllg(tmp, index, (sljit_s32)off, 0)));
index = tmp;
off = 0; /* clear offset */
}
@@ -923,7 +944,7 @@ static sljit_s32 make_addr_bxy(struct sljit_compiler *compiler,
}
addr->base = base;
addr->index = index;
- addr->offset = off;
+ addr->offset = (sljit_s32)off;
return SLJIT_SUCCESS;
}
@@ -944,7 +965,7 @@ static sljit_s32 make_addr_bx(struct sljit_compiler *compiler,
if (off != 0) {
/* shift and put the result into tmp */
SLJIT_ASSERT(0 <= off && off < 64);
- FAIL_IF(push_inst(compiler, sllg(tmp, index, off, 0)));
+ FAIL_IF(push_inst(compiler, sllg(tmp, index, (sljit_s32)off, 0)));
index = tmp;
off = 0; /* clear offset */
}
@@ -956,7 +977,7 @@ static sljit_s32 make_addr_bx(struct sljit_compiler *compiler,
}
addr->base = base;
addr->index = index;
- addr->offset = off;
+ addr->offset = (sljit_s32)off;
return SLJIT_SUCCESS;
}
@@ -965,47 +986,71 @@ static sljit_s32 make_addr_bx(struct sljit_compiler *compiler,
(cond) ? EVAL(i1, r, addr) : EVAL(i2, r, addr)
/* May clobber tmp1. */
-static sljit_s32 load_word(struct sljit_compiler *compiler, sljit_gpr dst,
- sljit_s32 src, sljit_sw srcw,
- sljit_s32 is_32bit)
+static sljit_s32 load_store_op(struct sljit_compiler *compiler, sljit_gpr reg,
+ sljit_s32 mem, sljit_sw memw,
+ sljit_s32 is_32bit, const sljit_ins* forms)
{
struct addr addr;
- sljit_ins ins;
- SLJIT_ASSERT(src & SLJIT_MEM);
- if (have_ldisp() || !is_32bit)
- FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1));
- else
- FAIL_IF(make_addr_bx(compiler, &addr, src, srcw, tmp1));
+ SLJIT_ASSERT(mem & SLJIT_MEM);
- if (is_32bit)
- ins = WHEN(is_u12(addr.offset), dst, l, ly, addr);
- else
- ins = lg(dst, addr.offset, addr.index, addr.base);
+ if (is_32bit && ((mem & OFFS_REG_MASK) || is_u12(memw) || !is_s20(memw))) {
+ FAIL_IF(make_addr_bx(compiler, &addr, mem, memw, tmp1));
+ return push_inst(compiler, forms[0] | R20A(reg) | R16A(addr.index) | R12A(addr.base) | (sljit_ins)addr.offset);
+ }
- return push_inst(compiler, ins);
+ FAIL_IF(make_addr_bxy(compiler, &addr, mem, memw, tmp1));
+ return push_inst(compiler, (is_32bit ? forms[1] : forms[2]) | R36A(reg) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset));
+}
+
+static const sljit_ins load_forms[3] = {
+ 0x58000000 /* l */,
+ 0xe30000000058 /* ly */,
+ 0xe30000000004 /* lg */
+};
+
+static const sljit_ins store_forms[3] = {
+ 0x50000000 /* st */,
+ 0xe30000000050 /* sty */,
+ 0xe30000000024 /* stg */
+};
+
+static const sljit_ins load_halfword_forms[3] = {
+ 0x48000000 /* lh */,
+ 0xe30000000078 /* lhy */,
+ 0xe30000000015 /* lgh */
+};
+
+/* May clobber tmp1. */
+static SLJIT_INLINE sljit_s32 load_word(struct sljit_compiler *compiler, sljit_gpr dst_r,
+ sljit_s32 src, sljit_sw srcw,
+ sljit_s32 is_32bit)
+{
+ return load_store_op(compiler, dst_r, src, srcw, is_32bit, load_forms);
}
/* May clobber tmp1. */
-static sljit_s32 store_word(struct sljit_compiler *compiler, sljit_gpr src,
- sljit_s32 dst, sljit_sw dstw,
+static sljit_s32 load_unsigned_word(struct sljit_compiler *compiler, sljit_gpr dst_r,
+ sljit_s32 src, sljit_sw srcw,
sljit_s32 is_32bit)
{
struct addr addr;
sljit_ins ins;
- SLJIT_ASSERT(dst & SLJIT_MEM);
- if (have_ldisp() || !is_32bit)
- FAIL_IF(make_addr_bxy(compiler, &addr, dst, dstw, tmp1));
- else
- FAIL_IF(make_addr_bx(compiler, &addr, dst, dstw, tmp1));
+ SLJIT_ASSERT(src & SLJIT_MEM);
- if (is_32bit)
- ins = WHEN(is_u12(addr.offset), src, st, sty, addr);
- else
- ins = stg(src, addr.offset, addr.index, addr.base);
+ FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1));
- return push_inst(compiler, ins);
+ ins = is_32bit ? 0xe30000000016 /* llgf */ : 0xe30000000004 /* lg */;
+ return push_inst(compiler, ins | R36A(dst_r) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset));
+}
+
+/* May clobber tmp1. */
+static SLJIT_INLINE sljit_s32 store_word(struct sljit_compiler *compiler, sljit_gpr src_r,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 is_32bit)
+{
+ return load_store_op(compiler, src_r, dst, dstw, is_32bit, store_forms);
}
#undef WHEN
@@ -1014,16 +1059,18 @@ static sljit_s32 emit_move(struct sljit_compiler *compiler,
sljit_gpr dst_r,
sljit_s32 src, sljit_sw srcw)
{
- SLJIT_ASSERT(!SLOW_IS_REG(src) || dst_r != gpr(src & REG_MASK));
+ sljit_gpr src_r;
+
+ SLJIT_ASSERT(!IS_GPR_REG(src) || dst_r != gpr(src & REG_MASK));
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
return push_load_imm_inst(compiler, dst_r, srcw);
if (src & SLJIT_MEM)
- return load_word(compiler, dst_r, src, srcw, (compiler->mode & SLJIT_I32_OP) != 0);
+ return load_word(compiler, dst_r, src, srcw, (compiler->mode & SLJIT_32) != 0);
- sljit_gpr src_r = gpr(src & REG_MASK);
- return push_inst(compiler, (compiler->mode & SLJIT_I32_OP) ? lr(dst_r, src_r) : lgr(dst_r, src_r));
+ src_r = gpr(src & REG_MASK);
+ return push_inst(compiler, (compiler->mode & SLJIT_32) ? lr(dst_r, src_r) : lgr(dst_r, src_r));
}
static sljit_s32 emit_rr(struct sljit_compiler *compiler, sljit_ins ins,
@@ -1035,8 +1082,8 @@ static sljit_s32 emit_rr(struct sljit_compiler *compiler, sljit_ins ins,
sljit_gpr src_r = tmp1;
sljit_s32 needs_move = 1;
- if (SLOW_IS_REG(dst)) {
- dst_r = gpr(dst & REG_MASK);
+ if (FAST_IS_REG(dst)) {
+ dst_r = gpr(dst);
if (dst == src1)
needs_move = 0;
@@ -1050,17 +1097,32 @@ static sljit_s32 emit_rr(struct sljit_compiler *compiler, sljit_ins ins,
FAIL_IF(emit_move(compiler, dst_r, src1, src1w));
if (FAST_IS_REG(src2))
- src_r = gpr(src2 & REG_MASK);
+ src_r = gpr(src2);
else
FAIL_IF(emit_move(compiler, tmp1, src2, src2w));
- FAIL_IF(push_inst(compiler, ins | (dst_r << 4) | src_r));
+ FAIL_IF(push_inst(compiler, ins | R4A(dst_r) | R0A(src_r)));
if (needs_move != 2)
return SLJIT_SUCCESS;
dst_r = gpr(dst & REG_MASK);
- return push_inst(compiler, (compiler->mode & SLJIT_I32_OP) ? lr(dst_r, tmp0) : lgr(dst_r, tmp0));
+ return push_inst(compiler, (compiler->mode & SLJIT_32) ? lr(dst_r, tmp0) : lgr(dst_r, tmp0));
+}
+
+static sljit_s32 emit_rr1(struct sljit_compiler *compiler, sljit_ins ins,
+ sljit_s32 dst,
+ sljit_s32 src1, sljit_sw src1w)
+{
+ sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst) : tmp0;
+ sljit_gpr src_r = tmp1;
+
+ if (FAST_IS_REG(src1))
+ src_r = gpr(src1);
+ else
+ FAIL_IF(emit_move(compiler, tmp1, src1, src1w));
+
+ return push_inst(compiler, ins | R4A(dst_r) | R0A(src_r));
}
static sljit_s32 emit_rrf(struct sljit_compiler *compiler, sljit_ins ins,
@@ -1068,21 +1130,21 @@ static sljit_s32 emit_rrf(struct sljit_compiler *compiler, sljit_ins ins,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- sljit_gpr dst_r = SLOW_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
+ sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
sljit_gpr src1_r = tmp0;
sljit_gpr src2_r = tmp1;
if (FAST_IS_REG(src1))
- src1_r = gpr(src1 & REG_MASK);
+ src1_r = gpr(src1);
else
FAIL_IF(emit_move(compiler, tmp0, src1, src1w));
if (FAST_IS_REG(src2))
- src2_r = gpr(src2 & REG_MASK);
+ src2_r = gpr(src2);
else
FAIL_IF(emit_move(compiler, tmp1, src2, src2w));
- return push_inst(compiler, ins | (dst_r << 4) | src1_r | (src2_r << 12));
+ return push_inst(compiler, ins | R4A(dst_r) | R0A(src1_r) | R12A(src2_r));
}
typedef enum {
@@ -1099,8 +1161,8 @@ static sljit_s32 emit_ri(struct sljit_compiler *compiler, sljit_ins ins,
sljit_gpr dst_r = tmp0;
sljit_s32 needs_move = 1;
- if (SLOW_IS_REG(dst)) {
- dst_r = gpr(dst & REG_MASK);
+ if (FAST_IS_REG(dst)) {
+ dst_r = gpr(dst);
if (dst == src1)
needs_move = 0;
@@ -1110,8 +1172,8 @@ static sljit_s32 emit_ri(struct sljit_compiler *compiler, sljit_ins ins,
FAIL_IF(emit_move(compiler, dst_r, src1, src1w));
if (type == RIL_A)
- return push_inst(compiler, ins | (dst_r << 36) | (src2w & 0xffffffff));
- return push_inst(compiler, ins | (dst_r << 20) | (src2w & 0xffff));
+ return push_inst(compiler, ins | R36A(dst_r) | (src2w & 0xffffffff));
+ return push_inst(compiler, ins | R20A(dst_r) | (src2w & 0xffff));
}
static sljit_s32 emit_rie_d(struct sljit_compiler *compiler, sljit_ins ins,
@@ -1119,15 +1181,15 @@ static sljit_s32 emit_rie_d(struct sljit_compiler *compiler, sljit_ins ins,
sljit_s32 src1, sljit_sw src1w,
sljit_sw src2w)
{
- sljit_gpr dst_r = SLOW_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
+ sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst) : tmp0;
sljit_gpr src_r = tmp0;
- if (!SLOW_IS_REG(src1))
+ if (!FAST_IS_REG(src1))
FAIL_IF(emit_move(compiler, tmp0, src1, src1w));
else
src_r = gpr(src1 & REG_MASK);
- return push_inst(compiler, ins | (dst_r << 36) | (src_r << 32) | (src2w & 0xffff) << 16);
+ return push_inst(compiler, ins | R36A(dst_r) | R32A(src_r) | (sljit_ins)(src2w & 0xffff) << 16);
}
typedef enum {
@@ -1147,7 +1209,7 @@ static sljit_s32 emit_rx(struct sljit_compiler *compiler, sljit_ins ins,
SLJIT_ASSERT(src2 & SLJIT_MEM);
- if (SLOW_IS_REG(dst)) {
+ if (FAST_IS_REG(dst)) {
dst_r = gpr(dst);
if (dst == src1)
@@ -1183,9 +1245,9 @@ static sljit_s32 emit_rx(struct sljit_compiler *compiler, sljit_ins ins,
}
if (type == RX_A)
- ins |= (dst_r << 20) | (index << 16) | (base << 12) | src2w;
+ ins |= R20A(dst_r) | R16A(index) | R12A(base) | (sljit_ins)src2w;
else
- ins |= (dst_r << 36) | (index << 32) | (base << 28) | disp_s20(src2w);
+ ins |= R36A(dst_r) | R32A(index) | R28A(base) | disp_s20((sljit_s32)src2w);
FAIL_IF(push_inst(compiler, ins));
@@ -1193,17 +1255,17 @@ static sljit_s32 emit_rx(struct sljit_compiler *compiler, sljit_ins ins,
return SLJIT_SUCCESS;
dst_r = gpr(dst);
- return push_inst(compiler, (compiler->mode & SLJIT_I32_OP) ? lr(dst_r, tmp0) : lgr(dst_r, tmp0));
+ return push_inst(compiler, (compiler->mode & SLJIT_32) ? lr(dst_r, tmp0) : lgr(dst_r, tmp0));
}
static sljit_s32 emit_siy(struct sljit_compiler *compiler, sljit_ins ins,
sljit_s32 dst, sljit_sw dstw,
sljit_sw srcw)
{
- SLJIT_ASSERT(dst & SLJIT_MEM);
-
sljit_gpr dst_r = tmp1;
+ SLJIT_ASSERT(dst & SLJIT_MEM);
+
if (dst & OFFS_REG_MASK) {
sljit_gpr index = tmp1;
@@ -1226,7 +1288,7 @@ static sljit_s32 emit_siy(struct sljit_compiler *compiler, sljit_ins ins,
else
dst_r = gpr(dst & REG_MASK);
- return push_inst(compiler, ins | ((srcw & 0xff) << 32) | (dst_r << 28) | disp_s20(dstw));
+ return push_inst(compiler, ins | ((sljit_ins)(srcw & 0xff) << 32) | R28A(dst_r) | disp_s20((sljit_s32)dstw));
}
struct ins_forms {
@@ -1240,7 +1302,7 @@ struct ins_forms {
};
static sljit_s32 emit_commutative(struct sljit_compiler *compiler, const struct ins_forms *forms,
- sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 dst,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
@@ -1250,7 +1312,7 @@ static sljit_s32 emit_commutative(struct sljit_compiler *compiler, const struct
if ((src1 | src2) & SLJIT_MEM) {
sljit_ins ins12, ins20;
- if (mode & SLJIT_I32_OP) {
+ if (mode & SLJIT_32) {
ins12 = forms->op;
ins20 = forms->op_y;
}
@@ -1297,7 +1359,7 @@ static sljit_s32 emit_commutative(struct sljit_compiler *compiler, const struct
}
}
- if (mode & SLJIT_I32_OP) {
+ if (mode & SLJIT_32) {
ins = forms->op_r;
ins_k = forms->op_rk;
}
@@ -1308,7 +1370,7 @@ static sljit_s32 emit_commutative(struct sljit_compiler *compiler, const struct
SLJIT_ASSERT(ins != 0 || ins_k != 0);
- if (ins && SLOW_IS_REG(dst)) {
+ if (ins && FAST_IS_REG(dst)) {
if (dst == src1)
return emit_rr(compiler, ins, dst, src1, src1w, src2, src2w);
@@ -1323,7 +1385,7 @@ static sljit_s32 emit_commutative(struct sljit_compiler *compiler, const struct
}
static sljit_s32 emit_non_commutative(struct sljit_compiler *compiler, const struct ins_forms *forms,
- sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 dst,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
@@ -1333,7 +1395,7 @@ static sljit_s32 emit_non_commutative(struct sljit_compiler *compiler, const str
if (src2 & SLJIT_MEM) {
sljit_ins ins12, ins20;
- if (mode & SLJIT_I32_OP) {
+ if (mode & SLJIT_32) {
ins12 = forms->op;
ins20 = forms->op_y;
}
@@ -1354,10 +1416,10 @@ static sljit_s32 emit_non_commutative(struct sljit_compiler *compiler, const str
return emit_rx(compiler, ins20, dst, src1, src1w, src2, src2w, RXY_A);
}
- ins = (mode & SLJIT_I32_OP) ? forms->op_rk : forms->op_grk;
+ ins = (mode & SLJIT_32) ? forms->op_rk : forms->op_grk;
- if (ins == 0 || (SLOW_IS_REG(dst) && dst == src1))
- return emit_rr(compiler, (mode & SLJIT_I32_OP) ? forms->op_r : forms->op_gr, dst, src1, src1w, src2, src2w);
+ if (ins == 0 || (FAST_IS_REG(dst) && dst == src1))
+ return emit_rr(compiler, (mode & SLJIT_32) ? forms->op_r : forms->op_gr, dst, src1, src1w, src2, src2w);
return emit_rrf(compiler, ins, dst, src1, src1w, src2, src2w);
}
@@ -1376,9 +1438,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
struct sljit_memory_fragment *buf;
void *code, *code_ptr;
sljit_uw *pool, *pool_ptr;
-
- sljit_uw source;
- sljit_sw offset; /* TODO(carenas): only need 32 bit */
+ sljit_sw source, offset; /* TODO(carenas): only need 32 bit */
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_generate_code(compiler));
@@ -1489,42 +1549,47 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
ins &= ~sljit_ins_const;
/* update instruction with relative address of constant */
- source = (sljit_uw)code_ptr;
- offset = (sljit_uw)pool_ptr - source;
+ source = (sljit_sw)code_ptr;
+ offset = (sljit_sw)pool_ptr - source;
+
SLJIT_ASSERT(!(offset & 1));
offset >>= 1; /* halfword (not byte) offset */
SLJIT_ASSERT(is_s32(offset));
+
ins |= (sljit_ins)offset & 0xffffffff;
/* update address */
const_->const_.addr = (sljit_uw)pool_ptr;
/* store initial value into pool and update pool address */
- *(pool_ptr++) = const_->init_value;
+ *(pool_ptr++) = (sljit_uw)const_->init_value;
/* move to next constant */
const_ = (struct sljit_s390x_const *)const_->const_.next;
}
if (jump && jump->addr == j) {
- sljit_sw target = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
+ sljit_sw target = (sljit_sw)((jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target);
if ((jump->flags & SLJIT_REWRITABLE_JUMP) || (jump->flags & JUMP_ADDR)) {
+ sljit_ins op, arg;
+
jump->addr = (sljit_uw)pool_ptr;
/* load address into tmp1 */
- source = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- offset = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(pool_ptr, executable_offset) - source;
+ source = (sljit_sw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
+ offset = (sljit_sw)SLJIT_ADD_EXEC_OFFSET(pool_ptr, executable_offset) - source;
+
SLJIT_ASSERT(!(offset & 1));
offset >>= 1;
SLJIT_ASSERT(is_s32(offset));
- encode_inst(&code_ptr,
- lgrl(tmp1, offset & 0xffffffff));
+
+ encode_inst(&code_ptr, lgrl(tmp1, offset & 0xffffffff));
/* store jump target into pool and update pool address */
- *(pool_ptr++) = target;
+ *(pool_ptr++) = (sljit_uw)target;
/* branch to tmp1 */
- sljit_ins op = (ins >> 32) & 0xf;
- sljit_ins arg = (ins >> 36) & 0xf;
+ op = (ins >> 32) & 0xf;
+ arg = (ins >> 36) & 0xf;
switch (op) {
case 4: /* brcl -> bcr */
ins = bcr(arg, tmp1);
@@ -1538,7 +1603,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
}
else {
jump->addr = (sljit_uw)code_ptr + 2;
- source = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
+ source = (sljit_sw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
offset = target - source;
/* offset must be halfword aligned */
@@ -1552,14 +1617,14 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
jump = jump->next;
}
if (put_label && put_label->addr == j) {
- source = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
+ source = (sljit_sw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
SLJIT_ASSERT(put_label->label);
put_label->addr = (sljit_uw)code_ptr;
/* store target into pool */
*pool_ptr = put_label->label->addr;
- offset = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(pool_ptr, executable_offset) - source;
+ offset = (sljit_sw)SLJIT_ADD_EXEC_OFFSET(pool_ptr, executable_offset) - source;
pool_ptr++;
SLJIT_ASSERT(!(offset & 1));
@@ -1578,6 +1643,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
compiler->error = SLJIT_ERR_COMPILED;
compiler->executable_offset = executable_offset;
compiler->executable_size = ins_size;
+ if (pool_size)
+ compiler->executable_size += (pad_size + pool_size);
code = SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
code_ptr = SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
SLJIT_CACHE_FLUSH(code, code_ptr);
@@ -1589,16 +1656,38 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
{
/* TODO(mundaym): implement all */
switch (feature_type) {
+ case SLJIT_HAS_FPU:
+#ifdef SLJIT_IS_FPU_AVAILABLE
+ return (SLJIT_IS_FPU_AVAILABLE) != 0;
+#else
+ return 1;
+#endif /* SLJIT_IS_FPU_AVAILABLE */
+
case SLJIT_HAS_CLZ:
- return have_eimm() ? 1 : 0; /* FLOGR instruction */
+ case SLJIT_HAS_REV:
+ case SLJIT_HAS_ROT:
+ case SLJIT_HAS_PREFETCH:
+ case SLJIT_HAS_COPY_F32:
+ case SLJIT_HAS_COPY_F64:
+ case SLJIT_HAS_SIMD:
+ case SLJIT_HAS_ATOMIC:
+ return 1;
+
+ case SLJIT_HAS_CTZ:
+ return 2;
+
case SLJIT_HAS_CMOV:
return have_lscond1() ? 1 : 0;
- case SLJIT_HAS_FPU:
- return 0;
}
return 0;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
+{
+ SLJIT_UNUSED_ARG(type);
+ return 0;
+}
+
/* --------------------------------------------------------------------- */
/* Entry, exit */
/* --------------------------------------------------------------------- */
@@ -1607,36 +1696,94 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 args = get_arg_count(arg_types);
- sljit_sw frame_size;
+ sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
+ sljit_s32 offset, i, tmp;
CHECK_ERROR();
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- /* saved registers go in callee allocated save area */
- compiler->local_size = (local_size + 0xf) & ~0xf;
- frame_size = compiler->local_size + SLJIT_S390X_DEFAULT_STACK_FRAME_SIZE;
+ /* Saved registers are stored in callee allocated save area. */
+ SLJIT_ASSERT(gpr(SLJIT_FIRST_SAVED_REG) == r6 && gpr(SLJIT_S0) == r13);
- FAIL_IF(push_inst(compiler, stmg(r6, r15, r6 * sizeof(sljit_sw), r15))); /* save registers TODO(MGM): optimize */
- if (frame_size != 0) {
- if (is_s16(-frame_size))
- FAIL_IF(push_inst(compiler, aghi(r15, -((sljit_s16)frame_size))));
- else if (is_s32(-frame_size))
- FAIL_IF(push_inst(compiler, agfi(r15, -((sljit_s32)frame_size))));
- else {
- FAIL_IF(push_load_imm_inst(compiler, tmp1, -frame_size));
- FAIL_IF(push_inst(compiler, la(r15, 0, tmp1, r15)));
+ offset = 2 * SSIZE_OF(sw);
+ if (saveds + scratches >= SLJIT_NUMBER_OF_REGISTERS) {
+ if (saved_arg_count == 0) {
+ FAIL_IF(push_inst(compiler, stmg(r6, r14, offset, r15)));
+ offset += 9 * SSIZE_OF(sw);
+ } else {
+ FAIL_IF(push_inst(compiler, stmg(r6, r13 - (sljit_gpr)saved_arg_count, offset, r15)));
+ offset += (8 - saved_arg_count) * SSIZE_OF(sw);
}
+ } else {
+ if (scratches == SLJIT_FIRST_SAVED_REG) {
+ FAIL_IF(push_inst(compiler, stg(r6, offset, 0, r15)));
+ offset += SSIZE_OF(sw);
+ } else if (scratches > SLJIT_FIRST_SAVED_REG) {
+ FAIL_IF(push_inst(compiler, stmg(r6, r6 + (sljit_gpr)(scratches - SLJIT_FIRST_SAVED_REG), offset, r15)));
+ offset += (scratches - (SLJIT_FIRST_SAVED_REG - 1)) * SSIZE_OF(sw);
+ }
+
+ if (saved_arg_count == 0) {
+ if (saveds == 0) {
+ FAIL_IF(push_inst(compiler, stg(r14, offset, 0, r15)));
+ offset += SSIZE_OF(sw);
+ } else {
+ FAIL_IF(push_inst(compiler, stmg(r14 - (sljit_gpr)saveds, r14, offset, r15)));
+ offset += (saveds + 1) * SSIZE_OF(sw);
+ }
+ } else if (saveds > saved_arg_count) {
+ if (saveds == saved_arg_count + 1) {
+ FAIL_IF(push_inst(compiler, stg(r14 - (sljit_gpr)saveds, offset, 0, r15)));
+ offset += SSIZE_OF(sw);
+ } else {
+ FAIL_IF(push_inst(compiler, stmg(r14 - (sljit_gpr)saveds, r13 - (sljit_gpr)saved_arg_count, offset, r15)));
+ offset += (saveds - saved_arg_count) * SSIZE_OF(sw);
+ }
+ }
+ }
+
+ if (saved_arg_count > 0) {
+ FAIL_IF(push_inst(compiler, stg(r14, offset, 0, r15)));
+ offset += SSIZE_OF(sw);
+ }
+
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ FAIL_IF(push_inst(compiler, 0x60000000 /* std */ | F20(i) | R12A(r15) | (sljit_ins)offset));
+ offset += SSIZE_OF(sw);
+ }
+
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ FAIL_IF(push_inst(compiler, 0x60000000 /* std */ | F20(i) | R12A(r15) | (sljit_ins)offset));
+ offset += SSIZE_OF(sw);
}
- if (args >= 1)
- FAIL_IF(push_inst(compiler, lgr(gpr(SLJIT_S0), gpr(SLJIT_R0))));
- if (args >= 2)
- FAIL_IF(push_inst(compiler, lgr(gpr(SLJIT_S1), gpr(SLJIT_R1))));
- if (args >= 3)
- FAIL_IF(push_inst(compiler, lgr(gpr(SLJIT_S2), gpr(SLJIT_R2))));
- SLJIT_ASSERT(args < 4);
+ local_size = (local_size + SLJIT_S390X_DEFAULT_STACK_FRAME_SIZE + 0xf) & ~0xf;
+ compiler->local_size = local_size;
+
+ if (is_s20(-local_size))
+ FAIL_IF(push_inst(compiler, 0xe30000000071 /* lay */ | R36A(r15) | R28A(r15) | disp_s20(-local_size)));
+ else
+ FAIL_IF(push_inst(compiler, 0xc20400000000 /* slgfi */ | R36A(r15) | (sljit_ins)local_size));
+
+ if (options & SLJIT_ENTER_REG_ARG)
+ return SLJIT_SUCCESS;
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ saved_arg_count = 0;
+ tmp = 0;
+ while (arg_types > 0) {
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ FAIL_IF(push_inst(compiler, lgr(gpr(SLJIT_S0 - saved_arg_count), gpr(SLJIT_R0 + tmp))));
+ saved_arg_count++;
+ }
+ tmp++;
+ }
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
return SLJIT_SUCCESS;
}
@@ -1649,37 +1796,118 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- /* TODO(mundaym): stack space for saved floating point registers */
- compiler->local_size = (local_size + 0xf) & ~0xf;
+ compiler->local_size = (local_size + SLJIT_S390X_DEFAULT_STACK_FRAME_SIZE + 0xf) & ~0xf;
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_gpr last_reg)
{
- sljit_sw size;
- sljit_gpr end;
+ sljit_s32 offset, i, tmp;
+ sljit_s32 local_size = compiler->local_size;
+ sljit_s32 saveds = compiler->saveds;
+ sljit_s32 scratches = compiler->scratches;
+ sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
+
+ if (is_u12(local_size))
+ FAIL_IF(push_inst(compiler, 0x41000000 /* ly */ | R20A(r15) | R12A(r15) | (sljit_ins)local_size));
+ else if (is_s20(local_size))
+ FAIL_IF(push_inst(compiler, 0xe30000000071 /* lay */ | R36A(r15) | R28A(r15) | disp_s20(local_size)));
+ else
+ FAIL_IF(push_inst(compiler, 0xc20a00000000 /* algfi */ | R36A(r15) | (sljit_ins)local_size));
- CHECK_ERROR();
- CHECK(check_sljit_emit_return(compiler, op, src, srcw));
+ offset = 2 * SSIZE_OF(sw);
+ if (saveds + scratches >= SLJIT_NUMBER_OF_REGISTERS) {
+ if (kept_saveds_count == 0) {
+ FAIL_IF(push_inst(compiler, lmg(r6, last_reg, offset, r15)));
+ offset += 9 * SSIZE_OF(sw);
+ } else {
+ FAIL_IF(push_inst(compiler, lmg(r6, r13 - (sljit_gpr)kept_saveds_count, offset, r15)));
+ offset += (8 - kept_saveds_count) * SSIZE_OF(sw);
+ }
+ } else {
+ if (scratches == SLJIT_FIRST_SAVED_REG) {
+ FAIL_IF(push_inst(compiler, lg(r6, offset, 0, r15)));
+ offset += SSIZE_OF(sw);
+ } else if (scratches > SLJIT_FIRST_SAVED_REG) {
+ FAIL_IF(push_inst(compiler, lmg(r6, r6 + (sljit_gpr)(scratches - SLJIT_FIRST_SAVED_REG), offset, r15)));
+ offset += (scratches - (SLJIT_FIRST_SAVED_REG - 1)) * SSIZE_OF(sw);
+ }
- FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
+ if (kept_saveds_count == 0) {
+ if (saveds == 0) {
+ if (last_reg == r14)
+ FAIL_IF(push_inst(compiler, lg(r14, offset, 0, r15)));
+ offset += SSIZE_OF(sw);
+ } else if (saveds == 1 && last_reg == r13) {
+ FAIL_IF(push_inst(compiler, lg(r13, offset, 0, r15)));
+ offset += 2 * SSIZE_OF(sw);
+ } else {
+ FAIL_IF(push_inst(compiler, lmg(r14 - (sljit_gpr)saveds, last_reg, offset, r15)));
+ offset += (saveds + 1) * SSIZE_OF(sw);
+ }
+ } else if (saveds > kept_saveds_count) {
+ if (saveds == kept_saveds_count + 1) {
+ FAIL_IF(push_inst(compiler, lg(r14 - (sljit_gpr)saveds, offset, 0, r15)));
+ offset += SSIZE_OF(sw);
+ } else {
+ FAIL_IF(push_inst(compiler, lmg(r14 - (sljit_gpr)saveds, r13 - (sljit_gpr)kept_saveds_count, offset, r15)));
+ offset += (saveds - kept_saveds_count) * SSIZE_OF(sw);
+ }
+ }
+ }
- size = compiler->local_size + SLJIT_S390X_DEFAULT_STACK_FRAME_SIZE + (r6 * sizeof(sljit_sw));
- if (!is_s20(size)) {
- FAIL_IF(push_load_imm_inst(compiler, tmp1, compiler->local_size + SLJIT_S390X_DEFAULT_STACK_FRAME_SIZE));
- FAIL_IF(push_inst(compiler, la(r15, 0, tmp1, r15)));
- size = r6 * sizeof(sljit_sw);
- end = r14; /* r15 has been restored already */
+ if (kept_saveds_count > 0) {
+ if (last_reg == r14)
+ FAIL_IF(push_inst(compiler, lg(r14, offset, 0, r15)));
+ offset += SSIZE_OF(sw);
}
- else
- end = r15;
- FAIL_IF(push_inst(compiler, lmg(r6, end, size, r15))); /* restore registers TODO(MGM): optimize */
- FAIL_IF(push_inst(compiler, br(r14))); /* return */
+ tmp = SLJIT_FS0 - compiler->fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ FAIL_IF(push_inst(compiler, 0x68000000 /* ld */ | F20(i) | R12A(r15) | (sljit_ins)offset));
+ offset += SSIZE_OF(sw);
+ }
+
+ for (i = compiler->fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ FAIL_IF(push_inst(compiler, 0x68000000 /* ld */ | F20(i) | R12A(r15) | (sljit_ins)offset));
+ offset += SSIZE_OF(sw);
+ }
return SLJIT_SUCCESS;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
+
+ FAIL_IF(emit_stack_frame_release(compiler, r14));
+ return push_inst(compiler, br(r14)); /* return */
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_to(compiler, src, srcw));
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(load_word(compiler, tmp1, src, srcw, 0 /* 64-bit */));
+ src = TMP_REG2;
+ srcw = 0;
+ } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, lgr(tmp1, gpr(src))));
+ src = TMP_REG2;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, r13));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
+}
+
/* --------------------------------------------------------------------- */
/* Operators */
/* --------------------------------------------------------------------- */
@@ -1692,7 +1920,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
CHECK_ERROR();
CHECK(check_sljit_emit_op0(compiler, op));
- op = GET_OPCODE(op) | (op & SLJIT_I32_OP);
+ op = GET_OPCODE(op) | (op & SLJIT_32);
switch (op) {
case SLJIT_BREAKPOINT:
/* The following invalid instruction is emitted by gdb. */
@@ -1768,12 +1996,126 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
return push_inst(compiler, lgr(arg1, tmp0));
}
+static sljit_s32 sljit_emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 op, sljit_gpr dst_r, sljit_gpr src_r)
+{
+ sljit_s32 is_ctz = (GET_OPCODE(op) == SLJIT_CTZ);
+
+ if ((op & SLJIT_32) && src_r != tmp0) {
+ FAIL_IF(push_inst(compiler, 0xb9160000 /* llgfr */ | R4A(tmp0) | R0A(src_r)));
+ src_r = tmp0;
+ }
+
+ if (is_ctz) {
+ FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? 0x1300 /* lcr */ : 0xb9030000 /* lcgr */) | R4A(tmp1) | R0A(src_r)));
+
+ if (src_r == tmp0)
+ FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? 0x1400 /* nr */ : 0xb9800000 /* ngr */) | R4A(tmp0) | R0A(tmp1)));
+ else
+ FAIL_IF(push_inst(compiler, 0xb9e40000 /* ngrk */ | R12A(tmp1) | R4A(tmp0) | R0A(src_r)));
+
+ src_r = tmp0;
+ }
+
+ FAIL_IF(push_inst(compiler, 0xb9830000 /* flogr */ | R4A(tmp0) | R0A(src_r)));
+
+ if (is_ctz)
+ FAIL_IF(push_inst(compiler, 0xec00000000d9 /* aghik */ | R36A(tmp1) | R32A(tmp0) | ((sljit_ins)(-64 & 0xffff) << 16)));
+
+ if (op & SLJIT_32) {
+ if (!is_ctz && dst_r != tmp0)
+ return push_inst(compiler, 0xec00000000d9 /* aghik */ | R36A(dst_r) | R32A(tmp0) | ((sljit_ins)(-32 & 0xffff) << 16));
+
+ FAIL_IF(push_inst(compiler, 0xc20800000000 /* agfi */ | R36A(tmp0) | (sljit_u32)-32));
+ }
+
+ if (is_ctz)
+ FAIL_IF(push_inst(compiler, 0xec0000000057 /* rxsbg */ | R36A(tmp0) | R32A(tmp1) | ((sljit_ins)((op & SLJIT_32) ? 59 : 58) << 24) | (63 << 16) | ((sljit_ins)((op & SLJIT_32) ? 5 : 6) << 8)));
+
+ if (dst_r == tmp0)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, ((op & SLJIT_32) ? 0x1800 /* lr */ : 0xb9040000 /* lgr */) | R4A(dst_r) | R0A(tmp0));
+}
+
+static sljit_s32 sljit_emit_rev(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ struct addr addr;
+ sljit_gpr reg;
+ sljit_ins ins;
+ sljit_s32 opcode = GET_OPCODE(op);
+ sljit_s32 is_16bit = (opcode == SLJIT_REV_U16 || opcode == SLJIT_REV_S16);
+
+ if (dst & SLJIT_MEM) {
+ if (src & SLJIT_MEM) {
+ FAIL_IF(load_store_op(compiler, tmp0, src, srcw, op & SLJIT_32, is_16bit ? load_halfword_forms : load_forms));
+ reg = tmp0;
+ } else
+ reg = gpr(src);
+
+ FAIL_IF(make_addr_bxy(compiler, &addr, dst, dstw, tmp1));
+
+ if (is_16bit)
+ ins = 0xe3000000003f /* strvh */;
+ else
+ ins = (op & SLJIT_32) ? 0xe3000000003e /* strv */ : 0xe3000000002f /* strvg */;
+
+ return push_inst(compiler, ins | R36A(reg) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset));
+ }
+
+ reg = gpr(dst);
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1));
+
+ if (is_16bit)
+ ins = 0xe3000000001f /* lrvh */;
+ else
+ ins = (op & SLJIT_32) ? 0xe3000000001e /* lrv */ : 0xe3000000000f /* lrvg */;
+
+ FAIL_IF(push_inst(compiler, ins | R36A(reg) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset)));
+
+ if (opcode == SLJIT_REV)
+ return SLJIT_SUCCESS;
+
+ if (is_16bit) {
+ if (op & SLJIT_32)
+ ins = (opcode == SLJIT_REV_U16) ? 0xb9950000 /* llhr */ : 0xb9270000 /* lhr */;
+ else
+ ins = (opcode == SLJIT_REV_U16) ? 0xb9850000 /* llghr */ : 0xb9070000 /* lghr */;
+ } else
+ ins = (opcode == SLJIT_REV_U32) ? 0xb9160000 /* llgfr */ : 0xb9140000 /* lgfr */;
+
+ return push_inst(compiler, ins | R4A(reg) | R0A(reg));
+ }
+
+ ins = (op & SLJIT_32) ? 0xb91f0000 /* lrvr */ : 0xb90f0000 /* lrvgr */;
+ FAIL_IF(push_inst(compiler, ins | R4A(reg) | R0A(gpr(src))));
+
+ if (opcode == SLJIT_REV)
+ return SLJIT_SUCCESS;
+
+ if (!is_16bit) {
+ ins = (opcode == SLJIT_REV_U32) ? 0xb9160000 /* llgfr */ : 0xb9140000 /* lgfr */;
+ return push_inst(compiler, ins | R4A(reg) | R0A(reg));
+ }
+
+ if (op & SLJIT_32) {
+ ins = (opcode == SLJIT_REV_U16) ? 0x88000000 /* srl */ : 0x8a000000 /* sra */;
+ return push_inst(compiler, ins | R20A(reg) | 16);
+ }
+
+ ins = (opcode == SLJIT_REV_U16) ? 0xeb000000000c /* srlg */ : 0xeb000000000a /* srag */;
+ return push_inst(compiler, ins | R36A(reg) | R32A(reg) | (48 << 16));
+}
+
/* LEVAL will be defined later with different parameters as needed */
#define WHEN2(cond, i1, i2) (cond) ? LEVAL(i1) : LEVAL(i2)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src, sljit_sw srcw)
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
{
sljit_ins ins;
struct addr mem;
@@ -1786,17 +2128,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src, srcw);
- if ((dst == SLJIT_UNUSED) && !HAS_FLAGS(op)) {
- /* TODO(carenas): implement prefetch? */
- return SLJIT_SUCCESS;
- }
-
if (opcode >= SLJIT_MOV && opcode <= SLJIT_MOV_P) {
/* LOAD REGISTER */
if (FAST_IS_REG(dst) && FAST_IS_REG(src)) {
dst_r = gpr(dst);
src_r = gpr(src);
- switch (opcode | (op & SLJIT_I32_OP)) {
+ switch (opcode | (op & SLJIT_32)) {
/* 32-bit */
case SLJIT_MOV32_U8:
ins = llcr(dst_r, src_r);
@@ -1811,6 +2148,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
ins = lhr(dst_r, src_r);
break;
case SLJIT_MOV32:
+ if (dst_r == src_r)
+ return SLJIT_SUCCESS;
ins = lr(dst_r, src_r);
break;
/* 64-bit */
@@ -1834,17 +2173,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
break;
case SLJIT_MOV:
case SLJIT_MOV_P:
+ if (dst_r == src_r)
+ return SLJIT_SUCCESS;
ins = lgr(dst_r, src_r);
break;
default:
ins = 0;
SLJIT_UNREACHABLE();
+ break;
}
FAIL_IF(push_inst(compiler, ins));
return SLJIT_SUCCESS;
}
/* LOAD IMMEDIATE */
- if (FAST_IS_REG(dst) && (src & SLJIT_IMM)) {
+ if (FAST_IS_REG(dst) && src == SLJIT_IMM) {
switch (opcode) {
case SLJIT_MOV_U8:
srcw = (sljit_sw)((sljit_u8)(srcw));
@@ -1862,6 +2204,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
srcw = (sljit_sw)((sljit_u32)(srcw));
break;
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
srcw = (sljit_sw)((sljit_s32)(srcw));
break;
}
@@ -1875,7 +2218,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
FAIL_IF(make_addr_bxy(compiler, &mem, src, srcw, tmp1));
/* TODO(carenas): convert all calls below to LEVAL */
- switch (opcode | (op & SLJIT_I32_OP)) {
+ switch (opcode | (op & SLJIT_32)) {
case SLJIT_MOV32_U8:
ins = llc(reg, mem.offset, mem.index, mem.base);
break;
@@ -1914,20 +2257,22 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
ins = lg(reg, mem.offset, mem.index, mem.base);
break;
default:
+ ins = 0;
SLJIT_UNREACHABLE();
+ break;
}
FAIL_IF(push_inst(compiler, ins));
return SLJIT_SUCCESS;
}
/* STORE and STORE IMMEDIATE */
- if ((dst & SLJIT_MEM)
- && (FAST_IS_REG(src) || (src & SLJIT_IMM))) {
+ if ((dst & SLJIT_MEM) && (FAST_IS_REG(src) || src == SLJIT_IMM)) {
+ struct addr mem;
sljit_gpr reg = FAST_IS_REG(src) ? gpr(src) : tmp0;
- if (src & SLJIT_IMM) {
+
+ if (src == SLJIT_IMM) {
/* TODO(mundaym): MOVE IMMEDIATE? */
FAIL_IF(push_load_imm_inst(compiler, reg, srcw));
}
- struct addr mem;
FAIL_IF(make_addr_bxy(compiler, &mem, dst, dstw, tmp1));
switch (opcode) {
case SLJIT_MOV_U8:
@@ -1940,6 +2285,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
WHEN2(is_u12(mem.offset), sth, sthy));
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
return push_inst(compiler,
WHEN2(is_u12(mem.offset), st, sty));
case SLJIT_MOV_P:
@@ -1972,6 +2318,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
EVAL(sthy, tmp0, mem));
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
FAIL_IF(push_inst(compiler,
EVAL(ly, tmp0, mem)));
FAIL_IF(make_addr_bxy(compiler, &mem, dst, dstw, tmp1));
@@ -1992,75 +2339,36 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
SLJIT_UNREACHABLE();
}
- SLJIT_ASSERT((src & SLJIT_IMM) == 0); /* no immediates */
+ SLJIT_ASSERT(src != SLJIT_IMM);
- dst_r = SLOW_IS_REG(dst) ? gpr(REG_MASK & dst) : tmp0;
- src_r = FAST_IS_REG(src) ? gpr(REG_MASK & src) : tmp0;
- if (src & SLJIT_MEM)
- FAIL_IF(load_word(compiler, src_r, src, srcw, src & SLJIT_I32_OP));
+ dst_r = FAST_IS_REG(dst) ? gpr(dst) : tmp0;
+ src_r = FAST_IS_REG(src) ? gpr(src) : tmp0;
compiler->status_flags_state = op & (VARIABLE_FLAG_MASK | SLJIT_SET_Z);
/* TODO(mundaym): optimize loads and stores */
- switch (opcode | (op & SLJIT_I32_OP)) {
- case SLJIT_NOT:
- /* emulate ~x with x^-1 */
- FAIL_IF(push_load_imm_inst(compiler, tmp1, -1));
- if (src_r != dst_r)
- FAIL_IF(push_inst(compiler, lgr(dst_r, src_r)));
-
- FAIL_IF(push_inst(compiler, xgr(dst_r, tmp1)));
- break;
- case SLJIT_NOT32:
- /* emulate ~x with x^-1 */
- if (have_eimm())
- FAIL_IF(push_inst(compiler, xilf(dst_r, -1)));
- else {
- FAIL_IF(push_load_imm_inst(compiler, tmp1, -1));
- if (src_r != dst_r)
- FAIL_IF(push_inst(compiler, lr(dst_r, src_r)));
-
- FAIL_IF(push_inst(compiler, xr(dst_r, tmp1)));
- }
- break;
- case SLJIT_NEG:
- compiler->status_flags_state |= SLJIT_CURRENT_FLAGS_ADD_SUB;
- FAIL_IF(push_inst(compiler, lcgr(dst_r, src_r)));
- break;
- case SLJIT_NEG32:
- compiler->status_flags_state |= SLJIT_CURRENT_FLAGS_ADD_SUB;
- FAIL_IF(push_inst(compiler, lcr(dst_r, src_r)));
- break;
+ switch (opcode) {
case SLJIT_CLZ:
- if (have_eimm()) {
- FAIL_IF(push_inst(compiler, flogr(tmp0, src_r))); /* clobbers tmp1 */
- if (dst_r != tmp0)
- FAIL_IF(push_inst(compiler, lgr(dst_r, tmp0)));
- } else {
- abort(); /* TODO(mundaym): no eimm (?) */
- }
- break;
- case SLJIT_CLZ32:
- if (have_eimm()) {
- FAIL_IF(push_inst(compiler, sllg(tmp1, src_r, 32, 0)));
- FAIL_IF(push_inst(compiler, iilf(tmp1, 0xffffffff)));
- FAIL_IF(push_inst(compiler, flogr(tmp0, tmp1))); /* clobbers tmp1 */
- if (dst_r != tmp0)
- FAIL_IF(push_inst(compiler, lr(dst_r, tmp0)));
- } else {
- abort(); /* TODO(mundaym): no eimm (?) */
- }
+ case SLJIT_CTZ:
+ if (src & SLJIT_MEM)
+ FAIL_IF(load_unsigned_word(compiler, src_r, src, srcw, op & SLJIT_32));
+
+ FAIL_IF(sljit_emit_clz_ctz(compiler, op, dst_r, src_r));
break;
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+ op |= SLJIT_32;
+ /* fallthrough */
+ case SLJIT_REV:
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ return sljit_emit_rev(compiler, op, dst, dstw, src, srcw);
default:
SLJIT_UNREACHABLE();
}
- if ((op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)) == (SLJIT_SET_Z | SLJIT_SET_OVERFLOW))
- FAIL_IF(update_zero_overflow(compiler, op, dst_r));
-
- /* TODO(carenas): doesn't need FAIL_IF */
- if ((dst != SLJIT_UNUSED) && (dst & SLJIT_MEM))
- FAIL_IF(store_word(compiler, dst_r, dst, dstw, op & SLJIT_I32_OP));
+ if (dst & SLJIT_MEM)
+ return store_word(compiler, dst_r, dst, dstw, op & SLJIT_32);
return SLJIT_SUCCESS;
}
@@ -2079,25 +2387,6 @@ static SLJIT_INLINE int is_commutative(sljit_s32 op)
return 0;
}
-static SLJIT_INLINE int is_shift(sljit_s32 op) {
- sljit_s32 v = GET_OPCODE(op);
- return (v == SLJIT_SHL || v == SLJIT_ASHR || v == SLJIT_LSHR) ? 1 : 0;
-}
-
-static SLJIT_INLINE int sets_signed_flag(sljit_s32 op)
-{
- switch (GET_FLAG_TYPE(op)) {
- case SLJIT_OVERFLOW:
- case SLJIT_NOT_OVERFLOW:
- case SLJIT_SIG_LESS:
- case SLJIT_SIG_LESS_EQUAL:
- case SLJIT_SIG_GREATER:
- case SLJIT_SIG_GREATER_EQUAL:
- return 1;
- }
- return 0;
-}
-
static const struct ins_forms add_forms = {
0x1a00, /* ar */
0xb9080000, /* agr */
@@ -2128,27 +2417,27 @@ static sljit_s32 sljit_emit_add(struct sljit_compiler *compiler, sljit_s32 op,
const struct ins_forms *forms;
sljit_ins ins;
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
if (!sets_zero_overflow && is_s8(src2w) && (src1 & SLJIT_MEM) && (dst == src1 && dstw == src1w)) {
if (sets_overflow)
- ins = (op & SLJIT_I32_OP) ? 0xeb000000006a /* asi */ : 0xeb000000007a /* agsi */;
+ ins = (op & SLJIT_32) ? 0xeb000000006a /* asi */ : 0xeb000000007a /* agsi */;
else
- ins = (op & SLJIT_I32_OP) ? 0xeb000000006e /* alsi */ : 0xeb000000007e /* algsi */;
+ ins = (op & SLJIT_32) ? 0xeb000000006e /* alsi */ : 0xeb000000007e /* algsi */;
return emit_siy(compiler, ins, dst, dstw, src2w);
}
if (is_s16(src2w)) {
if (sets_overflow)
- ins = (op & SLJIT_I32_OP) ? 0xec00000000d8 /* ahik */ : 0xec00000000d9 /* aghik */;
+ ins = (op & SLJIT_32) ? 0xec00000000d8 /* ahik */ : 0xec00000000d9 /* aghik */;
else
- ins = (op & SLJIT_I32_OP) ? 0xec00000000da /* alhsik */ : 0xec00000000db /* alghsik */;
+ ins = (op & SLJIT_32) ? 0xec00000000da /* alhsik */ : 0xec00000000db /* alghsik */;
FAIL_IF(emit_rie_d(compiler, ins, dst, src1, src1w, src2w));
goto done;
}
if (!sets_overflow) {
- if ((op & SLJIT_I32_OP) || is_u32(src2w)) {
- ins = (op & SLJIT_I32_OP) ? 0xc20b00000000 /* alfi */ : 0xc20a00000000 /* algfi */;
+ if ((op & SLJIT_32) || is_u32(src2w)) {
+ ins = (op & SLJIT_32) ? 0xc20b00000000 /* alfi */ : 0xc20a00000000 /* algfi */;
FAIL_IF(emit_ri(compiler, ins, dst, src1, src1w, src2w, RIL_A));
goto done;
}
@@ -2157,22 +2446,22 @@ static sljit_s32 sljit_emit_add(struct sljit_compiler *compiler, sljit_s32 op,
goto done;
}
}
- else if ((op & SLJIT_I32_OP) || is_s32(src2w)) {
- ins = (op & SLJIT_I32_OP) ? 0xc20900000000 /* afi */ : 0xc20800000000 /* agfi */;
+ else if ((op & SLJIT_32) || is_s32(src2w)) {
+ ins = (op & SLJIT_32) ? 0xc20900000000 /* afi */ : 0xc20800000000 /* agfi */;
FAIL_IF(emit_ri(compiler, ins, dst, src1, src1w, src2w, RIL_A));
goto done;
}
}
forms = sets_overflow ? &add_forms : &logical_add_forms;
- FAIL_IF(emit_commutative(compiler, forms, dst, dstw, src1, src1w, src2, src2w));
+ FAIL_IF(emit_commutative(compiler, forms, dst, src1, src1w, src2, src2w));
done:
if (sets_zero_overflow)
- FAIL_IF(update_zero_overflow(compiler, op, SLOW_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0));
+ FAIL_IF(update_zero_overflow(compiler, op, FAST_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0));
if (dst & SLJIT_MEM)
- return store_word(compiler, tmp0, dst, dstw, op & SLJIT_I32_OP);
+ return store_word(compiler, tmp0, dst, dstw, op & SLJIT_32);
return SLJIT_SUCCESS;
}
@@ -2202,78 +2491,84 @@ static sljit_s32 sljit_emit_sub(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- int sets_signed = sets_signed_flag(op);
+ sljit_s32 flag_type = GET_FLAG_TYPE(op);
+ int sets_signed = (flag_type >= SLJIT_SIG_LESS && flag_type <= SLJIT_NOT_OVERFLOW);
int sets_zero_overflow = (op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)) == (SLJIT_SET_Z | SLJIT_SET_OVERFLOW);
const struct ins_forms *forms;
sljit_ins ins;
- if (dst == SLJIT_UNUSED && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) {
- int compare_signed = GET_FLAG_TYPE(op) >= SLJIT_SIG_LESS;
+ if (dst == (sljit_s32)tmp0 && flag_type <= SLJIT_SIG_LESS_EQUAL) {
+ int compare_signed = flag_type >= SLJIT_SIG_LESS;
compiler->status_flags_state |= SLJIT_CURRENT_FLAGS_COMPARE;
- if (src2 & SLJIT_IMM) {
- if (compare_signed || ((op & VARIABLE_FLAG_MASK) == 0 && is_s32(src2w)))
- {
- if ((op & SLJIT_I32_OP) || is_s32(src2w)) {
- ins = (op & SLJIT_I32_OP) ? 0xc20d00000000 /* cfi */ : 0xc20c00000000 /* cgfi */;
+ if (src2 == SLJIT_IMM) {
+ if (compare_signed || ((op & VARIABLE_FLAG_MASK) == 0 && is_s32(src2w))) {
+ if ((op & SLJIT_32) || is_s32(src2w)) {
+ ins = (op & SLJIT_32) ? 0xc20d00000000 /* cfi */ : 0xc20c00000000 /* cgfi */;
return emit_ri(compiler, ins, src1, src1, src1w, src2w, RIL_A);
}
}
else {
- if ((op & SLJIT_I32_OP) || is_u32(src2w)) {
- ins = (op & SLJIT_I32_OP) ? 0xc20f00000000 /* clfi */ : 0xc20e00000000 /* clgfi */;
+ if ((op & SLJIT_32) || is_u32(src2w)) {
+ ins = (op & SLJIT_32) ? 0xc20f00000000 /* clfi */ : 0xc20e00000000 /* clgfi */;
return emit_ri(compiler, ins, src1, src1, src1w, src2w, RIL_A);
}
if (is_s16(src2w))
- return emit_rie_d(compiler, 0xec00000000db /* alghsik */, SLJIT_UNUSED, src1, src1w, src2w);
+ return emit_rie_d(compiler, 0xec00000000db /* alghsik */, (sljit_s32)tmp0, src1, src1w, src2w);
}
}
else if (src2 & SLJIT_MEM) {
- if ((op & SLJIT_I32_OP) && ((src2 & OFFS_REG_MASK) || is_u12(src2w))) {
+ if ((op & SLJIT_32) && ((src2 & OFFS_REG_MASK) || is_u12(src2w))) {
ins = compare_signed ? 0x59000000 /* c */ : 0x55000000 /* cl */;
return emit_rx(compiler, ins, src1, src1, src1w, src2, src2w, RX_A);
}
if (compare_signed)
- ins = (op & SLJIT_I32_OP) ? 0xe30000000059 /* cy */ : 0xe30000000020 /* cg */;
+ ins = (op & SLJIT_32) ? 0xe30000000059 /* cy */ : 0xe30000000020 /* cg */;
else
- ins = (op & SLJIT_I32_OP) ? 0xe30000000055 /* cly */ : 0xe30000000021 /* clg */;
+ ins = (op & SLJIT_32) ? 0xe30000000055 /* cly */ : 0xe30000000021 /* clg */;
return emit_rx(compiler, ins, src1, src1, src1w, src2, src2w, RXY_A);
}
if (compare_signed)
- ins = (op & SLJIT_I32_OP) ? 0x1900 /* cr */ : 0xb9200000 /* cgr */;
+ ins = (op & SLJIT_32) ? 0x1900 /* cr */ : 0xb9200000 /* cgr */;
else
- ins = (op & SLJIT_I32_OP) ? 0x1500 /* clr */ : 0xb9210000 /* clgr */;
+ ins = (op & SLJIT_32) ? 0x1500 /* clr */ : 0xb9210000 /* clgr */;
return emit_rr(compiler, ins, src1, src1, src1w, src2, src2w);
}
- if (src2 & SLJIT_IMM) {
+ if (src1 == SLJIT_IMM && src1w == 0 && (flag_type == 0 || sets_signed)) {
+ ins = (op & SLJIT_32) ? 0x1300 /* lcr */ : 0xb9030000 /* lcgr */;
+ FAIL_IF(emit_rr1(compiler, ins, dst, src2, src2w));
+ goto done;
+ }
+
+ if (src2 == SLJIT_IMM) {
sljit_sw neg_src2w = -src2w;
if (sets_signed || neg_src2w != 0 || (op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)) == 0) {
if (!sets_zero_overflow && is_s8(neg_src2w) && (src1 & SLJIT_MEM) && (dst == src1 && dstw == src1w)) {
if (sets_signed)
- ins = (op & SLJIT_I32_OP) ? 0xeb000000006a /* asi */ : 0xeb000000007a /* agsi */;
+ ins = (op & SLJIT_32) ? 0xeb000000006a /* asi */ : 0xeb000000007a /* agsi */;
else
- ins = (op & SLJIT_I32_OP) ? 0xeb000000006e /* alsi */ : 0xeb000000007e /* algsi */;
+ ins = (op & SLJIT_32) ? 0xeb000000006e /* alsi */ : 0xeb000000007e /* algsi */;
return emit_siy(compiler, ins, dst, dstw, neg_src2w);
}
if (is_s16(neg_src2w)) {
if (sets_signed)
- ins = (op & SLJIT_I32_OP) ? 0xec00000000d8 /* ahik */ : 0xec00000000d9 /* aghik */;
+ ins = (op & SLJIT_32) ? 0xec00000000d8 /* ahik */ : 0xec00000000d9 /* aghik */;
else
- ins = (op & SLJIT_I32_OP) ? 0xec00000000da /* alhsik */ : 0xec00000000db /* alghsik */;
+ ins = (op & SLJIT_32) ? 0xec00000000da /* alhsik */ : 0xec00000000db /* alghsik */;
FAIL_IF(emit_rie_d(compiler, ins, dst, src1, src1w, neg_src2w));
goto done;
}
}
if (!sets_signed) {
- if ((op & SLJIT_I32_OP) || is_u32(src2w)) {
- ins = (op & SLJIT_I32_OP) ? 0xc20500000000 /* slfi */ : 0xc20400000000 /* slgfi */;
+ if ((op & SLJIT_32) || is_u32(src2w)) {
+ ins = (op & SLJIT_32) ? 0xc20500000000 /* slfi */ : 0xc20400000000 /* slgfi */;
FAIL_IF(emit_ri(compiler, ins, dst, src1, src1w, src2w, RIL_A));
goto done;
}
@@ -2282,19 +2577,19 @@ static sljit_s32 sljit_emit_sub(struct sljit_compiler *compiler, sljit_s32 op,
goto done;
}
}
- else if ((op & SLJIT_I32_OP) || is_s32(neg_src2w)) {
- ins = (op & SLJIT_I32_OP) ? 0xc20900000000 /* afi */ : 0xc20800000000 /* agfi */;
+ else if ((op & SLJIT_32) || is_s32(neg_src2w)) {
+ ins = (op & SLJIT_32) ? 0xc20900000000 /* afi */ : 0xc20800000000 /* agfi */;
FAIL_IF(emit_ri(compiler, ins, dst, src1, src1w, neg_src2w, RIL_A));
goto done;
}
}
forms = sets_signed ? &sub_forms : &logical_sub_forms;
- FAIL_IF(emit_non_commutative(compiler, forms, dst, dstw, src1, src1w, src2, src2w));
+ FAIL_IF(emit_non_commutative(compiler, forms, dst, src1, src1w, src2, src2w));
done:
if (sets_signed) {
- sljit_gpr dst_r = SLOW_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
+ sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
if ((op & VARIABLE_FLAG_MASK) != SLJIT_SET_OVERFLOW) {
/* In case of overflow, the sign bit of the two source operands must be different, and
@@ -2303,14 +2598,14 @@ done:
The -result operation sets the corrent sign, because the result cannot be zero.
The overflow is considered greater, since the result must be equal to INT_MIN so its sign bit is set. */
FAIL_IF(push_inst(compiler, brc(0xe, 2 + 2)));
- FAIL_IF(push_inst(compiler, (op & SLJIT_I32_OP) ? lcr(tmp1, dst_r) : lcgr(tmp1, dst_r)));
+ FAIL_IF(push_inst(compiler, (op & SLJIT_32) ? lcr(tmp1, dst_r) : lcgr(tmp1, dst_r)));
}
else if (op & SLJIT_SET_Z)
FAIL_IF(update_zero_overflow(compiler, op, dst_r));
}
if (dst & SLJIT_MEM)
- return store_word(compiler, tmp0, dst, dstw, op & SLJIT_I32_OP);
+ return store_word(compiler, tmp0, dst, dstw, op & SLJIT_32);
return SLJIT_SUCCESS;
}
@@ -2336,7 +2631,7 @@ static const struct ins_forms multiply_overflow_forms = {
};
static sljit_s32 sljit_emit_multiply(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 dst,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
@@ -2351,29 +2646,29 @@ static sljit_s32 sljit_emit_multiply(struct sljit_compiler *compiler, sljit_s32
}
FAIL_IF(push_inst(compiler, aih(tmp0, 1)));
FAIL_IF(push_inst(compiler, nihf(tmp0, ~1U)));
- FAIL_IF(push_inst(compiler, ipm(flag_r)));
- FAIL_IF(push_inst(compiler, oilh(flag_r, 0x2000))); */
+ FAIL_IF(push_inst(compiler, ipm(tmp1)));
+ FAIL_IF(push_inst(compiler, oilh(tmp1, 0x2000))); */
- return emit_commutative(compiler, &multiply_overflow_forms, dst, dstw, src1, src1w, src2, src2w);
+ return emit_commutative(compiler, &multiply_overflow_forms, dst, src1, src1w, src2, src2w);
}
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
if (is_s16(src2w)) {
- ins = (op & SLJIT_I32_OP) ? 0xa70c0000 /* mhi */ : 0xa70d0000 /* mghi */;
+ ins = (op & SLJIT_32) ? 0xa70c0000 /* mhi */ : 0xa70d0000 /* mghi */;
return emit_ri(compiler, ins, dst, src1, src1w, src2w, RI_A);
}
if (is_s32(src2w)) {
- ins = (op & SLJIT_I32_OP) ? 0xc20100000000 /* msfi */ : 0xc20000000000 /* msgfi */;
+ ins = (op & SLJIT_32) ? 0xc20100000000 /* msfi */ : 0xc20000000000 /* msgfi */;
return emit_ri(compiler, ins, dst, src1, src1w, src2w, RIL_A);
}
}
- return emit_commutative(compiler, &multiply_forms, dst, dstw, src1, src1w, src2, src2w);
+ return emit_commutative(compiler, &multiply_forms, dst, src1, src1w, src2, src2w);
}
static sljit_s32 sljit_emit_bitwise_imm(struct sljit_compiler *compiler, sljit_s32 type,
- sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 dst,
sljit_s32 src1, sljit_sw src1w,
sljit_uw imm, sljit_s32 count16)
{
@@ -2381,7 +2676,7 @@ static sljit_s32 sljit_emit_bitwise_imm(struct sljit_compiler *compiler, sljit_s
sljit_gpr dst_r = tmp0;
sljit_s32 needs_move = 1;
- if (SLOW_IS_REG(dst)) {
+ if (IS_GPR_REG(dst)) {
dst_r = gpr(dst & REG_MASK);
if (dst == src1)
needs_move = 0;
@@ -2391,38 +2686,38 @@ static sljit_s32 sljit_emit_bitwise_imm(struct sljit_compiler *compiler, sljit_s
FAIL_IF(emit_move(compiler, dst_r, src1, src1w));
if (type == SLJIT_AND) {
- if (!(mode & SLJIT_I32_OP))
- FAIL_IF(push_inst(compiler, 0xc00a00000000 /* nihf */ | (dst_r << 36) | (imm >> 32)));
- return push_inst(compiler, 0xc00b00000000 /* nilf */ | (dst_r << 36) | (imm & 0xffffffff));
+ if (!(mode & SLJIT_32))
+ FAIL_IF(push_inst(compiler, 0xc00a00000000 /* nihf */ | R36A(dst_r) | (imm >> 32)));
+ return push_inst(compiler, 0xc00b00000000 /* nilf */ | R36A(dst_r) | (imm & 0xffffffff));
}
else if (type == SLJIT_OR) {
if (count16 >= 3) {
- FAIL_IF(push_inst(compiler, 0xc00c00000000 /* oihf */ | (dst_r << 36) | (imm >> 32)));
- return push_inst(compiler, 0xc00d00000000 /* oilf */ | (dst_r << 36) | (imm & 0xffffffff));
+ FAIL_IF(push_inst(compiler, 0xc00c00000000 /* oihf */ | R36A(dst_r) | (imm >> 32)));
+ return push_inst(compiler, 0xc00d00000000 /* oilf */ | R36A(dst_r) | (imm & 0xffffffff));
}
if (count16 >= 2) {
if ((imm & 0x00000000ffffffffull) == 0)
- return push_inst(compiler, 0xc00c00000000 /* oihf */ | (dst_r << 36) | (imm >> 32));
+ return push_inst(compiler, 0xc00c00000000 /* oihf */ | R36A(dst_r) | (imm >> 32));
if ((imm & 0xffffffff00000000ull) == 0)
- return push_inst(compiler, 0xc00d00000000 /* oilf */ | (dst_r << 36) | (imm & 0xffffffff));
+ return push_inst(compiler, 0xc00d00000000 /* oilf */ | R36A(dst_r) | (imm & 0xffffffff));
}
if ((imm & 0xffff000000000000ull) != 0)
- FAIL_IF(push_inst(compiler, 0xa5080000 /* oihh */ | (dst_r << 20) | (imm >> 48)));
+ FAIL_IF(push_inst(compiler, 0xa5080000 /* oihh */ | R20A(dst_r) | (imm >> 48)));
if ((imm & 0x0000ffff00000000ull) != 0)
- FAIL_IF(push_inst(compiler, 0xa5090000 /* oihl */ | (dst_r << 20) | ((imm >> 32) & 0xffff)));
+ FAIL_IF(push_inst(compiler, 0xa5090000 /* oihl */ | R20A(dst_r) | ((imm >> 32) & 0xffff)));
if ((imm & 0x00000000ffff0000ull) != 0)
- FAIL_IF(push_inst(compiler, 0xa50a0000 /* oilh */ | (dst_r << 20) | ((imm >> 16) & 0xffff)));
+ FAIL_IF(push_inst(compiler, 0xa50a0000 /* oilh */ | R20A(dst_r) | ((imm >> 16) & 0xffff)));
if ((imm & 0x000000000000ffffull) != 0 || imm == 0)
- return push_inst(compiler, 0xa50b0000 /* oill */ | (dst_r << 20) | (imm & 0xffff));
+ return push_inst(compiler, 0xa50b0000 /* oill */ | R20A(dst_r) | (imm & 0xffff));
return SLJIT_SUCCESS;
}
if ((imm & 0xffffffff00000000ull) != 0)
- FAIL_IF(push_inst(compiler, 0xc00600000000 /* xihf */ | (dst_r << 36) | (imm >> 32)));
+ FAIL_IF(push_inst(compiler, 0xc00600000000 /* xihf */ | R36A(dst_r) | (imm >> 32)));
if ((imm & 0x00000000ffffffffull) != 0 || imm == 0)
- return push_inst(compiler, 0xc00700000000 /* xilf */ | (dst_r << 36) | (imm & 0xffffffff));
+ return push_inst(compiler, 0xc00700000000 /* xilf */ | R36A(dst_r) | (imm & 0xffffffff));
return SLJIT_SUCCESS;
}
@@ -2457,18 +2752,18 @@ static const struct ins_forms bitwise_xor_forms = {
};
static sljit_s32 sljit_emit_bitwise(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 dst,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
sljit_s32 type = GET_OPCODE(op);
const struct ins_forms *forms;
- if ((src2 & SLJIT_IMM) && (!(op & SLJIT_SET_Z) || (type == SLJIT_AND && dst == SLJIT_UNUSED))) {
+ if (src2 == SLJIT_IMM && (!(op & SLJIT_SET_Z) || (type == SLJIT_AND && dst == (sljit_s32)tmp0))) {
sljit_s32 count16 = 0;
sljit_uw imm = (sljit_uw)src2w;
- if (op & SLJIT_I32_OP)
+ if (op & SLJIT_32)
imm &= 0xffffffffull;
if ((imm & 0x000000000000ffffull) != 0 || imm == 0)
@@ -2480,7 +2775,7 @@ static sljit_s32 sljit_emit_bitwise(struct sljit_compiler *compiler, sljit_s32 o
if ((imm & 0xffff000000000000ull) != 0)
count16++;
- if (type == SLJIT_AND && dst == SLJIT_UNUSED && count16 == 1) {
+ if (type == SLJIT_AND && dst == (sljit_s32)tmp0 && count16 == 1) {
sljit_gpr src_r = tmp0;
if (FAST_IS_REG(src1))
@@ -2489,16 +2784,16 @@ static sljit_s32 sljit_emit_bitwise(struct sljit_compiler *compiler, sljit_s32 o
FAIL_IF(emit_move(compiler, tmp0, src1, src1w));
if ((imm & 0x000000000000ffffull) != 0 || imm == 0)
- return push_inst(compiler, 0xa7010000 | (src_r << 20) | imm);
+ return push_inst(compiler, 0xa7010000 /* tmll */ | R20A(src_r) | imm);
if ((imm & 0x00000000ffff0000ull) != 0)
- return push_inst(compiler, 0xa7000000 | (src_r << 20) | (imm >> 16));
+ return push_inst(compiler, 0xa7000000 /* tmlh */ | R20A(src_r) | (imm >> 16));
if ((imm & 0x0000ffff00000000ull) != 0)
- return push_inst(compiler, 0xa7030000 | (src_r << 20) | (imm >> 32));
- return push_inst(compiler, 0xa7020000 | (src_r << 20) | (imm >> 48));
+ return push_inst(compiler, 0xa7030000 /* tmhl */ | R20A(src_r) | (imm >> 32));
+ return push_inst(compiler, 0xa7020000 /* tmhh */ | R20A(src_r) | (imm >> 48));
}
if (!(op & SLJIT_SET_Z))
- return sljit_emit_bitwise_imm(compiler, type, dst, dstw, src1, src1w, imm, count16);
+ return sljit_emit_bitwise_imm(compiler, type, dst, src1, src1w, imm, count16);
}
if (type == SLJIT_AND)
@@ -2508,62 +2803,111 @@ static sljit_s32 sljit_emit_bitwise(struct sljit_compiler *compiler, sljit_s32 o
else
forms = &bitwise_xor_forms;
- return emit_commutative(compiler, forms, dst, dstw, src1, src1w, src2, src2w);
+ return emit_commutative(compiler, forms, dst, src1, src1w, src2, src2w);
}
static sljit_s32 sljit_emit_shift(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 dst,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
sljit_s32 type = GET_OPCODE(op);
- sljit_gpr dst_r = SLOW_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
+ sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
sljit_gpr src_r = tmp0;
sljit_gpr base_r = tmp0;
sljit_ins imm = 0;
sljit_ins ins;
if (FAST_IS_REG(src1))
- src_r = gpr(src1 & REG_MASK);
+ src_r = gpr(src1);
else
FAIL_IF(emit_move(compiler, tmp0, src1, src1w));
- if (src2 & SLJIT_IMM)
- imm = src2w & ((op & SLJIT_I32_OP) ? 0x1f : 0x3f);
- else if (FAST_IS_REG(src2))
- base_r = gpr(src2 & REG_MASK);
- else {
- FAIL_IF(emit_move(compiler, tmp1, src2, src2w));
- base_r = tmp1;
- }
+ if (src2 != SLJIT_IMM) {
+ if (FAST_IS_REG(src2))
+ base_r = gpr(src2);
+ else {
+ FAIL_IF(emit_move(compiler, tmp1, src2, src2w));
+ base_r = tmp1;
+ }
- if ((op & SLJIT_I32_OP) && dst_r == src_r) {
- if (type == SLJIT_SHL)
+ if ((op & SLJIT_32) && (type == SLJIT_MSHL || type == SLJIT_MLSHR || type == SLJIT_MASHR)) {
+ if (base_r != tmp1) {
+ FAIL_IF(push_inst(compiler, 0xec0000000055 /* risbg */ | R36A(tmp1) | R32A(base_r) | (59 << 24) | (1 << 23) | (63 << 16)));
+ base_r = tmp1;
+ } else
+ FAIL_IF(push_inst(compiler, 0xa5070000 /* nill */ | R20A(tmp1) | 0x1f));
+ }
+ } else
+ imm = (sljit_ins)(src2w & ((op & SLJIT_32) ? 0x1f : 0x3f));
+
+ if ((op & SLJIT_32) && dst_r == src_r) {
+ if (type == SLJIT_SHL || type == SLJIT_MSHL)
ins = 0x89000000 /* sll */;
- else if (type == SLJIT_LSHR)
+ else if (type == SLJIT_LSHR || type == SLJIT_MLSHR)
ins = 0x88000000 /* srl */;
else
ins = 0x8a000000 /* sra */;
- FAIL_IF(push_inst(compiler, ins | (dst_r << 20) | (base_r << 12) | imm));
- }
- else {
- if (type == SLJIT_SHL)
- ins = (op & SLJIT_I32_OP) ? 0xeb00000000df /* sllk */ : 0xeb000000000d /* sllg */;
- else if (type == SLJIT_LSHR)
- ins = (op & SLJIT_I32_OP) ? 0xeb00000000de /* srlk */ : 0xeb000000000c /* srlg */;
+ FAIL_IF(push_inst(compiler, ins | R20A(dst_r) | R12A(base_r) | imm));
+ } else {
+ if (type == SLJIT_SHL || type == SLJIT_MSHL)
+ ins = (op & SLJIT_32) ? 0xeb00000000df /* sllk */ : 0xeb000000000d /* sllg */;
+ else if (type == SLJIT_LSHR || type == SLJIT_MLSHR)
+ ins = (op & SLJIT_32) ? 0xeb00000000de /* srlk */ : 0xeb000000000c /* srlg */;
else
- ins = (op & SLJIT_I32_OP) ? 0xeb00000000dc /* srak */ : 0xeb000000000a /* srag */;
+ ins = (op & SLJIT_32) ? 0xeb00000000dc /* srak */ : 0xeb000000000a /* srag */;
- FAIL_IF(push_inst(compiler, ins | (dst_r << 36) | (src_r << 32) | (base_r << 28) | (imm << 16)));
+ FAIL_IF(push_inst(compiler, ins | R36A(dst_r) | R32A(src_r) | R28A(base_r) | (imm << 16)));
}
if ((op & SLJIT_SET_Z) && type != SLJIT_ASHR)
- return push_inst(compiler, (op & SLJIT_I32_OP) ? or(dst_r, dst_r) : ogr(dst_r, dst_r));
+ return push_inst(compiler, (op & SLJIT_32) ? or(dst_r, dst_r) : ogr(dst_r, dst_r));
return SLJIT_SUCCESS;
}
+static sljit_s32 sljit_emit_rotate(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
+ sljit_gpr src_r = tmp0;
+ sljit_gpr base_r = tmp0;
+ sljit_ins imm = 0;
+ sljit_ins ins;
+
+ if (FAST_IS_REG(src1))
+ src_r = gpr(src1);
+ else
+ FAIL_IF(emit_move(compiler, tmp0, src1, src1w));
+
+ if (src2 != SLJIT_IMM) {
+ if (FAST_IS_REG(src2))
+ base_r = gpr(src2);
+ else {
+ FAIL_IF(emit_move(compiler, tmp1, src2, src2w));
+ base_r = tmp1;
+ }
+ }
+
+ if (GET_OPCODE(op) == SLJIT_ROTR) {
+ if (src2 != SLJIT_IMM) {
+ ins = (op & SLJIT_32) ? 0x1300 /* lcr */ : 0xb9030000 /* lcgr */;
+ FAIL_IF(push_inst(compiler, ins | R4A(tmp1) | R0A(base_r)));
+ base_r = tmp1;
+ } else
+ src2w = -src2w;
+ }
+
+ if (src2 == SLJIT_IMM)
+ imm = (sljit_ins)(src2w & ((op & SLJIT_32) ? 0x1f : 0x3f));
+
+ ins = (op & SLJIT_32) ? 0xeb000000001d /* rll */ : 0xeb000000001c /* rllg */;
+ return push_inst(compiler, ins | R36A(dst_r) | R32A(src_r) | R28A(base_r) | (imm << 16));
+}
+
static const struct ins_forms addc_forms = {
0xb9980000, /* alcr */
0xb9880000, /* alcgr */
@@ -2590,21 +2934,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
sljit_s32 src2, sljit_sw src2w)
{
CHECK_ERROR();
- CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
- return SLJIT_SUCCESS;
-
- compiler->mode = op & SLJIT_I32_OP;
+ compiler->mode = op & SLJIT_32;
compiler->status_flags_state = op & (VARIABLE_FLAG_MASK | SLJIT_SET_Z);
- if (GET_OPCODE(op) >= SLJIT_ADD || GET_OPCODE(op) <= SLJIT_SUBC)
- compiler->status_flags_state |= SLJIT_CURRENT_FLAGS_ADD_SUB;
-
- if (is_commutative(op) && (src1 & SLJIT_IMM) && !(src2 & SLJIT_IMM)) {
+ if (is_commutative(op) && src1 == SLJIT_IMM && src2 != SLJIT_IMM) {
src1 ^= src2;
src2 ^= src1;
src1 ^= src2;
@@ -2616,44 +2954,184 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
switch (GET_OPCODE(op)) {
case SLJIT_ADD:
+ compiler->status_flags_state |= SLJIT_CURRENT_FLAGS_ADD;
return sljit_emit_add(compiler, op, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_ADDC:
- FAIL_IF(emit_commutative(compiler, &addc_forms, dst, dstw, src1, src1w, src2, src2w));
+ compiler->status_flags_state |= SLJIT_CURRENT_FLAGS_ADD;
+ FAIL_IF(emit_commutative(compiler, &addc_forms, dst, src1, src1w, src2, src2w));
if (dst & SLJIT_MEM)
- return store_word(compiler, tmp0, dst, dstw, op & SLJIT_I32_OP);
+ return store_word(compiler, tmp0, dst, dstw, op & SLJIT_32);
return SLJIT_SUCCESS;
case SLJIT_SUB:
+ compiler->status_flags_state |= SLJIT_CURRENT_FLAGS_SUB;
return sljit_emit_sub(compiler, op, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUBC:
- FAIL_IF(emit_non_commutative(compiler, &subc_forms, dst, dstw, src1, src1w, src2, src2w));
+ compiler->status_flags_state |= SLJIT_CURRENT_FLAGS_SUB;
+ FAIL_IF(emit_non_commutative(compiler, &subc_forms, dst, src1, src1w, src2, src2w));
if (dst & SLJIT_MEM)
- return store_word(compiler, tmp0, dst, dstw, op & SLJIT_I32_OP);
+ return store_word(compiler, tmp0, dst, dstw, op & SLJIT_32);
return SLJIT_SUCCESS;
case SLJIT_MUL:
- FAIL_IF(sljit_emit_multiply(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ FAIL_IF(sljit_emit_multiply(compiler, op, dst, src1, src1w, src2, src2w));
break;
case SLJIT_AND:
case SLJIT_OR:
case SLJIT_XOR:
- FAIL_IF(sljit_emit_bitwise(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ FAIL_IF(sljit_emit_bitwise(compiler, op, dst, src1, src1w, src2, src2w));
break;
case SLJIT_SHL:
+ case SLJIT_MSHL:
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
case SLJIT_ASHR:
- FAIL_IF(sljit_emit_shift(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ case SLJIT_MASHR:
+ FAIL_IF(sljit_emit_shift(compiler, op, dst, src1, src1w, src2, src2w));
+ break;
+ case SLJIT_ROTL:
+ case SLJIT_ROTR:
+ FAIL_IF(sljit_emit_rotate(compiler, op, dst, src1, src1w, src2, src2w));
break;
}
if (dst & SLJIT_MEM)
- return store_word(compiler, tmp0, dst, dstw, op & SLJIT_I32_OP);
+ return store_word(compiler, tmp0, dst, dstw, op & SLJIT_32);
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(
- struct sljit_compiler *compiler,
- sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, (sljit_s32)tmp0, 0, src1, src1w, src2, src2w);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
+{
+ sljit_s32 is_right;
+ sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64;
+ sljit_gpr dst_r = gpr(dst_reg);
+ sljit_gpr src1_r = gpr(src1_reg);
+ sljit_gpr src2_r = gpr(src2_reg);
+ sljit_gpr src3_r = tmp1;
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_shift_into(compiler, op, dst_reg, src1_reg, src2_reg, src3, src3w));
+
+ is_right = (GET_OPCODE(op) == SLJIT_LSHR || GET_OPCODE(op) == SLJIT_MLSHR);
+
+ if (src1_reg == src2_reg) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, (is_right ? SLJIT_ROTR : SLJIT_ROTL) | (op & SLJIT_32), dst_reg, 0, src1_reg, 0, src3, src3w);
+ }
+
+ ADJUST_LOCAL_OFFSET(src3, src3w);
+
+ if (src3 == SLJIT_IMM) {
+ src3w &= bit_length - 1;
+
+ if (src3w == 0)
+ return SLJIT_SUCCESS;
+
+ if (op & SLJIT_32) {
+ if (dst_r == src1_r) {
+ ins = is_right ? 0x88000000 /* srl */ : 0x89000000 /* sll */;
+ FAIL_IF(push_inst(compiler, ins | R20A(dst_r) | (sljit_ins)src3w));
+ } else {
+ ins = is_right ? 0xeb00000000de /* srlk */ : 0xeb00000000df /* sllk */;
+ FAIL_IF(push_inst(compiler, ins | R36A(dst_r) | R32A(src1_r) | ((sljit_ins)src3w << 16)));
+ }
+ } else {
+ ins = is_right ? 0xeb000000000c /* srlg */ : 0xeb000000000d /* sllg */;
+ FAIL_IF(push_inst(compiler, ins | R36A(dst_r) | R32A(src1_r) | ((sljit_ins)src3w << 16)));
+ }
+
+ ins = 0xec0000000055 /* risbg */;
+
+ if (is_right) {
+ src3w = bit_length - src3w;
+ ins |= ((sljit_ins)(64 - bit_length) << 24) | ((sljit_ins)(63 - src3w) << 16) | ((sljit_ins)src3w << 8);
+ } else
+ ins |= ((sljit_ins)(64 - src3w) << 24) | ((sljit_ins)63 << 16) | ((sljit_ins)(src3w + 64 - bit_length) << 8);
+
+ return push_inst(compiler, ins | R36A(dst_r) | R32A(src2_r));
+ }
+
+ if (!(src3 & SLJIT_MEM)) {
+ src3_r = gpr(src3);
+
+ if (dst_r == src3_r) {
+ FAIL_IF(push_inst(compiler, 0x1800 /* lr */ | R4A(tmp1) | R0A(src3_r)));
+ src3_r = tmp1;
+ }
+ } else
+ FAIL_IF(load_word(compiler, tmp1, src3, src3w, op & SLJIT_32));
+
+ if (op & SLJIT_32) {
+ if (GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR) {
+ if (src3_r != tmp1) {
+ FAIL_IF(push_inst(compiler, 0xec0000000055 /* risbg */ | R36A(tmp1) | R32A(src3_r) | (59 << 24) | (1 << 23) | (63 << 16)));
+ src3_r = tmp1;
+ } else
+ FAIL_IF(push_inst(compiler, 0xa5070000 /* nill */ | R20A(tmp1) | 0x1f));
+ }
+
+ if (dst_r == src1_r) {
+ ins = is_right ? 0x88000000 /* srl */ : 0x89000000 /* sll */;
+ FAIL_IF(push_inst(compiler, ins | R20A(dst_r) | R12A(src3_r)));
+ } else {
+ ins = is_right ? 0xeb00000000de /* srlk */ : 0xeb00000000df /* sllk */;
+ FAIL_IF(push_inst(compiler, ins | R36A(dst_r) | R32A(src1_r) | R28A(src3_r)));
+ }
+
+ if (src3_r != tmp1) {
+ FAIL_IF(push_inst(compiler, 0xa50f0000 /* llill */ | R20A(tmp1) | 0x1f));
+ FAIL_IF(push_inst(compiler, 0x1700 /* xr */ | R4A(tmp1) | R0A(src3_r)));
+ } else
+ FAIL_IF(push_inst(compiler, 0xc00700000000 /* xilf */ | R36A(tmp1) | 0x1f));
+
+ ins = is_right ? 0xeb00000000df /* sllk */ : 0xeb00000000de /* srlk */;
+ FAIL_IF(push_inst(compiler, ins | R36A(tmp0) | R32A(src2_r) | R28A(tmp1) | (0x1 << 16)));
+
+ return push_inst(compiler, 0x1600 /* or */ | R4A(dst_r) | R0A(tmp0));
+ }
+
+ ins = is_right ? 0xeb000000000c /* srlg */ : 0xeb000000000d /* sllg */;
+ FAIL_IF(push_inst(compiler, ins | R36A(dst_r) | R32A(src1_r) | R28A(src3_r)));
+
+ ins = is_right ? 0xeb000000000d /* sllg */ : 0xeb000000000c /* srlg */;
+
+ if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) {
+ if (src3_r != tmp1)
+ FAIL_IF(push_inst(compiler, 0xa50f0000 /* llill */ | R20A(tmp1) | 0x3f));
+
+ FAIL_IF(push_inst(compiler, ins | R36A(tmp0) | R32A(src2_r) | (0x1 << 16)));
+ src2_r = tmp0;
+
+ if (src3_r != tmp1)
+ FAIL_IF(push_inst(compiler, 0xb9820000 /* xgr */ | R4A(tmp1) | R0A(src3_r)));
+ else
+ FAIL_IF(push_inst(compiler, 0xc00700000000 /* xilf */ | R36A(tmp1) | 0x3f));
+ } else
+ FAIL_IF(push_inst(compiler, 0xb9030000 /* lcgr */ | R4A(tmp1) | R0A(src3_r)));
+
+ FAIL_IF(push_inst(compiler, ins | R36A(tmp0) | R32A(src2_r) | R28A(tmp1)));
+ return push_inst(compiler, 0xb9810000 /* ogr */ | R4A(dst_r) | R0A(tmp0));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src, sljit_sw srcw)
{
sljit_gpr src_r;
+ struct addr addr;
CHECK_ERROR();
CHECK(check_sljit_emit_op_src(compiler, op, src, srcw));
@@ -2667,36 +3145,64 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(
return push_inst(compiler, br(src_r));
case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN:
- /* TODO(carenas): implement? */
return SLJIT_SUCCESS;
case SLJIT_PREFETCH_L1:
case SLJIT_PREFETCH_L2:
case SLJIT_PREFETCH_L3:
case SLJIT_PREFETCH_ONCE:
- /* TODO(carenas): implement */
- return SLJIT_SUCCESS;
+ FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1));
+ return push_inst(compiler, 0xe31000000036 /* pfd */ | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset));
default:
- /* TODO(carenas): probably should not success by default */
return SLJIT_SUCCESS;
}
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
{
- CHECK_REG_INDEX(check_sljit_get_register_index(reg));
- return gpr(reg);
+ sljit_gpr dst_r = link_r;
+ sljit_s32 size;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_dst(compiler, op, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ switch (op) {
+ case SLJIT_FAST_ENTER:
+ if (FAST_IS_REG(dst))
+ return push_inst(compiler, lgr(gpr(dst), link_r));
+ break;
+ case SLJIT_GET_RETURN_ADDRESS:
+ dst_r = FAST_IS_REG(dst) ? gpr(dst) : tmp0;
+
+ size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds - SLJIT_KEPT_SAVEDS_COUNT(compiler->options), 2);
+ FAIL_IF(load_word(compiler, dst_r, SLJIT_MEM1(SLJIT_SP), compiler->local_size + size, 0));
+ break;
+ }
+
+ if (dst & SLJIT_MEM)
+ return store_word(compiler, dst_r, dst, dstw, 0);
+
+ return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
{
- CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- abort();
+ CHECK_REG_INDEX(check_sljit_get_register_index(type, reg));
+
+ if (type == SLJIT_GP_REGISTER)
+ return (sljit_s32)gpr(reg);
+
+ if (type != SLJIT_FLOAT_REGISTER)
+ return -1;
+
+ return (sljit_s32)freg_map[reg];
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
sljit_ins ins = 0;
@@ -2711,38 +3217,369 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
/* Floating point operators */
/* --------------------------------------------------------------------- */
+#define FLOAT_LOAD 0
+#define FLOAT_STORE 1
+
+static sljit_s32 float_mem(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ struct addr addr;
+ sljit_ins ins;
+
+ SLJIT_ASSERT(mem & SLJIT_MEM);
+
+ if ((mem & OFFS_REG_MASK) || is_u12(memw) || !is_s20(memw)) {
+ FAIL_IF(make_addr_bx(compiler, &addr, mem, memw, tmp1));
+
+ if (op & FLOAT_STORE)
+ ins = (op & SLJIT_32) ? 0x70000000 /* ste */ : 0x60000000 /* std */;
+ else
+ ins = (op & SLJIT_32) ? 0x78000000 /* le */ : 0x68000000 /* ld */;
+
+ return push_inst(compiler, ins | F20(reg) | R16A(addr.index) | R12A(addr.base) | (sljit_ins)addr.offset);
+ }
+
+ FAIL_IF(make_addr_bxy(compiler, &addr, mem, memw, tmp1));
+
+ if (op & FLOAT_STORE)
+ ins = (op & SLJIT_32) ? 0xed0000000066 /* stey */ : 0xed0000000067 /* stdy */;
+ else
+ ins = (op & SLJIT_32) ? 0xed0000000064 /* ley */ : 0xed0000000065 /* ldy */;
+
+ return push_inst(compiler, ins | F36(reg) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset));
+}
+
+static sljit_s32 emit_float(struct sljit_compiler *compiler, sljit_ins ins_r, sljit_ins ins,
+ sljit_s32 reg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ struct addr addr;
+
+ if (!(src & SLJIT_MEM))
+ return push_inst(compiler, ins_r | F4(reg) | F0(src));
+
+ FAIL_IF(make_addr_bx(compiler, &addr, src, srcw, tmp1));
+ return push_inst(compiler, ins | F36(reg) | R32A(addr.index) | R28A(addr.base) | ((sljit_ins)addr.offset << 16));
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins dst_r = FAST_IS_REG(dst) ? gpr(dst) : tmp0;
+ sljit_ins ins;
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(float_mem(compiler, FLOAT_LOAD | (op & SLJIT_32), TMP_FREG1, src, srcw));
+ src = TMP_FREG1;
+ }
+
+ /* M3 is set to 5 */
+ if (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64)
+ ins = (op & SLJIT_32) ? 0xb3a85000 /* cgebr */ : 0xb3a95000 /* cgdbr */;
+ else
+ ins = (op & SLJIT_32) ? 0xb3985000 /* cfebr */ : 0xb3995000 /* cfdbr */;
+
+ FAIL_IF(push_inst(compiler, ins | R4A(dst_r) | F0(src)));
+
+ if (dst & SLJIT_MEM)
+ return store_word(compiler, dst_r, dst, dstw, GET_OPCODE(op) >= SLJIT_CONV_S32_FROM_F64);
+
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 sljit_emit_fop1_conv_f64_from_w(struct sljit_compiler *compiler, sljit_ins ins,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+
+ if (src == SLJIT_IMM) {
+ FAIL_IF(push_load_imm_inst(compiler, tmp0, srcw));
+ src = (sljit_s32)tmp0;
+ }
+ else if (src & SLJIT_MEM) {
+ FAIL_IF(load_word(compiler, tmp0, src, srcw, ins & 0x100000));
+ src = (sljit_s32)tmp0;
+ }
+
+ FAIL_IF(push_inst(compiler, ins | F4(dst_r) | R0(src)));
+
+ if (dst & SLJIT_MEM)
+ return float_mem(compiler, FLOAT_STORE | ((ins & 0x10000) ? 0 : SLJIT_32), TMP_FREG1, dst, dstw);
+
+ return SLJIT_SUCCESS;
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins ins;
+
+ if (src == SLJIT_IMM && GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
+ srcw = (sljit_s32)srcw;
+
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW)
+ ins = (op & SLJIT_32) ? 0xb3a40000 /* cegbr */ : 0xb3a50000 /* cdgbr */;
+ else
+ ins = (op & SLJIT_32) ? 0xb3940000 /* cefbr */ : 0xb3950000 /* cdfbr */;
+
+ return sljit_emit_fop1_conv_f64_from_w(compiler, ins, dst, dstw, src, srcw);
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins ins;
+
+ if (src == SLJIT_IMM && GET_OPCODE(op) == SLJIT_CONV_F64_FROM_U32)
+ srcw = (sljit_u32)srcw;
+
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_UW)
+ ins = (op & SLJIT_32) ? 0xb3a00000 /* celgbr */ : 0xb3a10000 /* cdlgbr */;
+ else
+ ins = (op & SLJIT_32) ? 0xb3900000 /* celfbr */ : 0xb3910000 /* cdlfbr */;
+
+ return sljit_emit_fop1_conv_f64_from_w(compiler, ins, dst, dstw, src, srcw);
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_ins ins_r, ins;
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(float_mem(compiler, FLOAT_LOAD | (op & SLJIT_32), TMP_FREG1, src1, src1w));
+ src1 = TMP_FREG1;
+ }
+
+ if (op & SLJIT_32) {
+ ins_r = 0xb3090000 /* cebr */;
+ ins = 0xed0000000009 /* ceb */;
+ } else {
+ ins_r = 0xb3190000 /* cdbr */;
+ ins = 0xed0000000019 /* cdb */;
+ }
+
+ return emit_float(compiler, ins_r, ins, src1, src2, src2w);
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
+ sljit_s32 dst_r;
+ sljit_ins ins;
+
CHECK_ERROR();
- abort();
+
+ SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
+
+ if (op == SLJIT_CONV_F64_FROM_F32)
+ FAIL_IF(emit_float(compiler, 0xb3040000 /* ldebr */, 0xed0000000004 /* ldeb */, dst_r, src, srcw));
+ else {
+ if (src & SLJIT_MEM) {
+ FAIL_IF(float_mem(compiler, FLOAT_LOAD | (op == SLJIT_CONV_F32_FROM_F64 ? 0 : (op & SLJIT_32)), dst_r, src, srcw));
+ src = dst_r;
+ }
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV_F64:
+ if (FAST_IS_REG(dst)) {
+ if (dst == src)
+ return SLJIT_SUCCESS;
+
+ ins = (op & SLJIT_32) ? 0x3800 /* ler */ : 0x2800 /* ldr */;
+ break;
+ }
+ return float_mem(compiler, FLOAT_STORE | (op & SLJIT_32), src, dst, dstw);
+ case SLJIT_CONV_F64_FROM_F32:
+ /* Only SLJIT_CONV_F32_FROM_F64. */
+ ins = 0xb3440000 /* ledbr */;
+ break;
+ case SLJIT_NEG_F64:
+ ins = (op & SLJIT_32) ? 0xb3030000 /* lcebr */ : 0xb3130000 /* lcdbr */;
+ break;
+ default:
+ SLJIT_ASSERT(GET_OPCODE(op) == SLJIT_ABS_F64);
+ ins = (op & SLJIT_32) ? 0xb3000000 /* lpebr */ : 0xb3100000 /* lpdbr */;
+ break;
+ }
+
+ FAIL_IF(push_inst(compiler, ins | F4(dst_r) | F0(src)));
+ }
+
+ if (!(dst & SLJIT_MEM))
+ return SLJIT_SUCCESS;
+
+ SLJIT_ASSERT(dst_r == TMP_FREG1);
+
+ return float_mem(compiler, FLOAT_STORE | (op & SLJIT_32), TMP_FREG1, dst, dstw);
}
+#define FLOAT_MOV(op, dst_r, src_r) \
+ (((op & SLJIT_32) ? 0x3800 /* ler */ : 0x2800 /* ldr */) | F4(dst_r) | F0(src_r))
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
+ sljit_s32 dst_r = TMP_FREG1;
+ sljit_ins ins_r, ins;
+
CHECK_ERROR();
- abort();
+ CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
+
+ do {
+ if (FAST_IS_REG(dst)) {
+ dst_r = dst;
+
+ if (dst == src1)
+ break;
+
+ if (dst == src2) {
+ if (GET_OPCODE(op) == SLJIT_ADD_F64 || GET_OPCODE(op) == SLJIT_MUL_F64) {
+ src2 = src1;
+ src2w = src1w;
+ src1 = dst;
+ break;
+ }
+
+ FAIL_IF(push_inst(compiler, FLOAT_MOV(op, TMP_FREG1, src2)));
+ src2 = TMP_FREG1;
+ }
+ }
+
+ if (src1 & SLJIT_MEM)
+ FAIL_IF(float_mem(compiler, FLOAT_LOAD | (op & SLJIT_32), dst_r, src1, src1w));
+ else
+ FAIL_IF(push_inst(compiler, FLOAT_MOV(op, dst_r, src1)));
+ } while (0);
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_ADD_F64:
+ ins_r = (op & SLJIT_32) ? 0xb30a0000 /* aebr */ : 0xb31a0000 /* adbr */;
+ ins = (op & SLJIT_32) ? 0xed000000000a /* aeb */ : 0xed000000001a /* adb */;
+ break;
+ case SLJIT_SUB_F64:
+ ins_r = (op & SLJIT_32) ? 0xb30b0000 /* sebr */ : 0xb31b0000 /* sdbr */;
+ ins = (op & SLJIT_32) ? 0xed000000000b /* seb */ : 0xed000000001b /* sdb */;
+ break;
+ case SLJIT_MUL_F64:
+ ins_r = (op & SLJIT_32) ? 0xb3170000 /* meebr */ : 0xb31c0000 /* mdbr */;
+ ins = (op & SLJIT_32) ? 0xed0000000017 /* meeb */ : 0xed000000001c /* mdb */;
+ break;
+ default:
+ SLJIT_ASSERT(GET_OPCODE(op) == SLJIT_DIV_F64);
+ ins_r = (op & SLJIT_32) ? 0xb30d0000 /* debr */ : 0xb31d0000 /* ddbr */;
+ ins = (op & SLJIT_32) ? 0xed000000000d /* deb */ : 0xed000000001d /* ddb */;
+ break;
+ }
+
+ FAIL_IF(emit_float(compiler, ins_r, ins, dst_r, src2, src2w));
+
+ if (dst & SLJIT_MEM)
+ return float_mem(compiler, FLOAT_STORE | (op & SLJIT_32), TMP_FREG1, dst, dstw);
+
+ SLJIT_ASSERT(dst_r != TMP_FREG1);
+ return SLJIT_SUCCESS;
}
-/* --------------------------------------------------------------------- */
-/* Other instructions */
-/* --------------------------------------------------------------------- */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2r(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_s32 reg;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fop2r(compiler, op, dst_freg, src1, src1w, src2, src2w));
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+ if (src2 & SLJIT_MEM) {
+ FAIL_IF(float_mem(compiler, FLOAT_LOAD | (op & SLJIT_32), TMP_FREG1, src2, src2w));
+ src2 = TMP_FREG1;
+ }
+
+ if (src1 & SLJIT_MEM) {
+ reg = (dst_freg == src2) ? TMP_FREG1 : dst_freg;
+ FAIL_IF(float_mem(compiler, FLOAT_LOAD | (op & SLJIT_32), reg, src1, src1w));
+ src1 = reg;
+ }
+
+ return push_inst(compiler, 0xb3720000 /* cpsdr */ | F12(src2) | F4(dst_freg) | F0(src1));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
{
+ union {
+ sljit_s32 imm;
+ sljit_f32 value;
+ } u;
+
CHECK_ERROR();
- CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
- ADJUST_LOCAL_OFFSET(dst, dstw);
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
+
+ u.value = value;
+
+ FAIL_IF(push_load_imm_inst(compiler, tmp1, (sljit_sw)(((sljit_uw)u.imm << 32))));
+ return push_inst(compiler, 0xb3c10000 /* ldgr */ | F4(freg) | R0A(tmp1));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ union {
+ sljit_sw imm;
+ sljit_f64 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
- if (FAST_IS_REG(dst))
- return push_inst(compiler, lgr(gpr(dst), fast_link_r));
+ u.value = value;
- /* memory */
- return store_word(compiler, fast_link_r, dst, dstw, 0);
+ FAIL_IF(push_load_imm_inst(compiler, tmp1, (sljit_sw)u.imm));
+ return push_inst(compiler, 0xb3c10000 /* ldgr */ | F4(freg) | R0A(tmp1));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_gpr gen_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ gen_r = gpr(reg);
+
+ if (GET_OPCODE(op) == SLJIT_COPY_TO_F64) {
+ if (op & SLJIT_32) {
+ FAIL_IF(push_inst(compiler, 0xeb000000000d /* sllg */ | R36A(tmp0) | R32A(gen_r) | (32 << 16)));
+ gen_r = tmp0;
+ }
+
+ return push_inst(compiler, 0xb3c10000 /* ldgr */ | F4(freg) | R0A(gen_r));
+ }
+
+ FAIL_IF(push_inst(compiler, 0xb3cd0000 /* lgdr */ | R4A(gen_r) | F0(freg)));
+
+ if (!(op & SLJIT_32))
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, 0xeb000000000c /* srlg */ | R36A(gen_r) | R32A(gen_r) | (32 << 16));
}
/* --------------------------------------------------------------------- */
@@ -2767,14 +3604,14 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
{
+ struct sljit_jump *jump;
sljit_u8 mask = ((type & 0xff) < SLJIT_JUMP) ? get_cc(compiler, type & 0xff) : 0xf;
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_jump(compiler, type));
/* record jump */
- struct sljit_jump *jump = (struct sljit_jump *)
- ensure_abuf(compiler, sizeof(struct sljit_jump));
+ jump = (struct sljit_jump *)ensure_abuf(compiler, sizeof(struct sljit_jump));
PTR_FAIL_IF(!jump);
set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
jump->addr = compiler->size;
@@ -2782,7 +3619,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
/* emit jump instruction */
type &= 0xff;
if (type >= SLJIT_FAST_CALL)
- PTR_FAIL_IF(push_inst(compiler, brasl(type == SLJIT_FAST_CALL ? fast_link_r : link_r, 0)));
+ PTR_FAIL_IF(push_inst(compiler, brasl(link_r, 0)));
else
PTR_FAIL_IF(push_inst(compiler, brcl(mask, 0)));
@@ -2792,14 +3629,16 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 arg_types)
{
+ SLJIT_UNUSED_ARG(arg_types);
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if (type & SLJIT_CALL_RETURN) {
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, r14));
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
+ }
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_jump(compiler, type);
}
@@ -2809,18 +3648,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
CHECK_ERROR();
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
- ADJUST_LOCAL_OFFSET(src, srcw);
- if (src & SLJIT_IMM) {
+ if (src == SLJIT_IMM) {
SLJIT_ASSERT(!(srcw & 1)); /* target address must be even */
FAIL_IF(push_load_imm_inst(compiler, src_r, srcw));
}
- else if (src & SLJIT_MEM)
+ else if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
FAIL_IF(load_word(compiler, src_r, src, srcw, 0 /* 64-bit */));
+ }
/* emit jump instruction */
if (type >= SLJIT_FAST_CALL)
- return push_inst(compiler, basr(type == SLJIT_FAST_CALL ? fast_link_r : link_r, src_r));
+ return push_inst(compiler, basr(link_r, src_r));
return push_inst(compiler, br(src_r));
}
@@ -2829,14 +3669,32 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
sljit_s32 arg_types,
sljit_s32 src, sljit_sw srcw)
{
+ SLJIT_UNUSED_ARG(arg_types);
+
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ SLJIT_ASSERT(gpr(TMP_REG2) == tmp1);
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(load_word(compiler, tmp1, src, srcw, 0 /* 64-bit */));
+ src = TMP_REG2;
+ srcw = 0;
+ }
+ if (type & SLJIT_CALL_RETURN) {
+ if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ FAIL_IF(push_inst(compiler, lgr(tmp1, gpr(src))));
+ src = TMP_REG2;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, r14));
+ type = SLJIT_JUMP;
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_ijump(compiler, type, src, srcw);
}
@@ -2844,13 +3702,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
{
- sljit_u8 mask = get_cc(compiler, type & 0xff);
+ sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
+ sljit_gpr loc_r = tmp1;
+ sljit_u8 mask = get_cc(compiler, type);
CHECK_ERROR();
CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
- sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
- sljit_gpr loc_r = tmp1;
switch (GET_OPCODE(op)) {
case SLJIT_AND:
case SLJIT_OR:
@@ -2859,11 +3717,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
/* dst is also source operand */
if (dst & SLJIT_MEM)
- FAIL_IF(load_word(compiler, dst_r, dst, dstw, op & SLJIT_I32_OP));
+ FAIL_IF(load_word(compiler, dst_r, dst, dstw, op & SLJIT_32));
break;
+ case SLJIT_MOV32:
+ op |= SLJIT_32;
+ /* fallthrough */
case SLJIT_MOV:
- case (SLJIT_MOV32 & ~SLJIT_I32_OP):
/* can write straight into destination */
loc_r = dst_r;
break;
@@ -2876,7 +3736,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
if (have_lscond2()) {
FAIL_IF(push_load_imm_inst(compiler, loc_r, 0));
FAIL_IF(push_inst(compiler,
- WHEN2(op & SLJIT_I32_OP, lochi, locghi)));
+ WHEN2(op & SLJIT_32, lochi, locghi)));
} else {
/* TODO(mundaym): no load/store-on-condition 2 facility (ipm? branch-and-set?) */
abort();
@@ -2888,51 +3748,700 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
#define LEVAL(i) i(dst_r, loc_r)
case SLJIT_AND:
FAIL_IF(push_inst(compiler,
- WHEN2(op & SLJIT_I32_OP, nr, ngr)));
+ WHEN2(op & SLJIT_32, nr, ngr)));
break;
case SLJIT_OR:
FAIL_IF(push_inst(compiler,
- WHEN2(op & SLJIT_I32_OP, or, ogr)));
+ WHEN2(op & SLJIT_32, or, ogr)));
break;
case SLJIT_XOR:
FAIL_IF(push_inst(compiler,
- WHEN2(op & SLJIT_I32_OP, xr, xgr)));
+ WHEN2(op & SLJIT_32, xr, xgr)));
break;
#undef LEVAL
}
/* store result to memory if required */
if (dst & SLJIT_MEM)
- return store_word(compiler, dst_r, dst, dstw, op & SLJIT_I32_OP);
+ return store_word(compiler, dst_r, dst, dstw, (op & SLJIT_32));
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 dst_reg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
+{
+ sljit_ins mask;
+ sljit_gpr src_r;
+ sljit_gpr dst_r = gpr(dst_reg);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_select(compiler, type, dst_reg, src1, src1w, src2_reg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (dst_reg != src2_reg) {
+ if (src1 == dst_reg) {
+ src1 = src2_reg;
+ src1w = 0;
+ type ^= 0x1;
+ } else {
+ if (ADDRESSING_DEPENDS_ON(src1, dst_reg)) {
+ FAIL_IF(load_word(compiler, dst_r, src1, src1w, type & SLJIT_32));
+ src1 = src2_reg;
+ src1w = 0;
+ type ^= 0x1;
+ } else
+ FAIL_IF(push_inst(compiler, ((type & SLJIT_32) ? 0x1800 /* lr */ : 0xb9040000 /* lgr */) | R4A(dst_r) | R0A(gpr(src2_reg))));
+ }
+ }
+
+ mask = get_cc(compiler, type & ~SLJIT_32);
+
+ if (src1 & SLJIT_MEM) {
+ if (src1 & OFFS_REG_MASK) {
+ src_r = gpr(OFFS_REG(src1));
+
+ if (src1w != 0) {
+ FAIL_IF(push_inst(compiler, 0xeb000000000d /* sllg */ | R36A(tmp1) | R32A(src_r) | ((sljit_ins)(src1w & 0x3) << 16)));
+ src_r = tmp1;
+ }
+
+ FAIL_IF(push_inst(compiler, 0xb9e80000 /* agrk */ | R12A(src_r) | R4A(tmp1) | R0A(gpr(src1 & REG_MASK))));
+ src_r = tmp1;
+ src1w = 0;
+ } else if (!is_s20(src1w)) {
+ FAIL_IF(push_load_imm_inst(compiler, tmp1, src1w));
+
+ if (src1 & REG_MASK)
+ FAIL_IF(push_inst(compiler, 0xb9e80000 /* agrk */ | R12A(tmp1) | R4A(tmp1) | R0A(gpr(src1 & REG_MASK))));
+
+ src_r = tmp1;
+ src1w = 0;
+ } else
+ src_r = gpr(src1 & REG_MASK);
+
+ ins = (type & SLJIT_32) ? 0xeb00000000f2 /* loc */ : 0xeb00000000e2 /* locg */;
+ return push_inst(compiler, ins | R36A(dst_r) | (mask << 32) | R28A(src_r) | disp_s20((sljit_s32)src1w));
+ }
+
+ if (src1 == SLJIT_IMM) {
+ if (type & SLJIT_32)
+ src1w = (sljit_s32)src1w;
+
+ if (have_lscond2() && is_s16(src1w)) {
+ ins = (type & SLJIT_32) ? 0xec0000000042 /* lochi */ : 0xec0000000046 /* locghi */;
+ return push_inst(compiler, ins | R36A(dst_r) | (mask << 32) | (sljit_ins)(src1w & 0xffff) << 16);
+ }
+
+ FAIL_IF(push_load_imm_inst(compiler, tmp0, src1w));
+ src_r = tmp0;
+ } else
+ src_r = gpr(src1);
+
+ ins = (type & SLJIT_32) ? 0xb9f20000 /* locr */ : 0xb9e20000 /* locgr */;
+ return push_inst(compiler, ins | (mask << 12) | R4A(dst_r) | R0A(src_r));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
+{
+ sljit_ins ins;
+ struct sljit_label *label;
+ struct sljit_jump *jump;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fselect(compiler, type, dst_freg, src1, src1w, src2_freg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (dst_freg != src2_freg) {
+ if (dst_freg == src1) {
+ src1 = src2_freg;
+ src1w = 0;
+ type ^= 0x1;
+ } else {
+ ins = (type & SLJIT_32) ? 0x3800 /* ler */ : 0x2800 /* ldr */;
+ FAIL_IF(push_inst(compiler, ins | F4(dst_freg) | F0(src2_freg)));
+ }
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
+ jump = sljit_emit_jump(compiler, (type & ~SLJIT_32) ^ 0x1);
+ FAIL_IF(!jump);
+
+ if (!(src1 & SLJIT_MEM)) {
+ ins = (type & SLJIT_32) ? 0x3800 /* ler */ : 0x2800 /* ldr */;
+ FAIL_IF(push_inst(compiler, ins | F4(dst_freg) | F0(src1)));
+ } else
+ FAIL_IF(float_mem(compiler, FLOAT_LOAD | (type & SLJIT_32), dst_freg, src1, src1w));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ label = sljit_emit_label(compiler);
+ FAIL_IF(!label);
+
+ sljit_set_label(jump, label);
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_ins ins, reg1, reg2, base, offs = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ if (!(reg & REG_PAIR_MASK))
+ return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
+
+ ADJUST_LOCAL_OFFSET(mem, memw);
+
+ base = gpr(mem & REG_MASK);
+ reg1 = gpr(REG_PAIR_FIRST(reg));
+ reg2 = gpr(REG_PAIR_SECOND(reg));
+
+ if (mem & OFFS_REG_MASK) {
+ memw &= 0x3;
+ offs = gpr(OFFS_REG(mem));
+
+ if (memw != 0) {
+ FAIL_IF(push_inst(compiler, 0xeb000000000d /* sllg */ | R36A(tmp1) | R32A(offs) | ((sljit_ins)memw << 16)));
+ offs = tmp1;
+ } else if (!(type & SLJIT_MEM_STORE) && (base == reg1 || base == reg2) && (offs == reg1 || offs == reg2)) {
+ FAIL_IF(push_inst(compiler, 0xb9f80000 | R12A(tmp1) | R4A(base) | R0A(offs)));
+ base = tmp1;
+ offs = 0;
+ }
+
+ memw = 0;
+ } else if (memw < -0x80000 || memw > 0x7ffff - ((reg2 == reg1 + 1) ? 0 : SSIZE_OF(sw))) {
+ FAIL_IF(push_load_imm_inst(compiler, tmp1, memw));
+
+ if (base == 0)
+ base = tmp1;
+ else
+ offs = tmp1;
+
+ memw = 0;
+ }
+
+ if (offs == 0 && reg2 == (reg1 + 1)) {
+ ins = (type & SLJIT_MEM_STORE) ? 0xeb0000000024 /* stmg */ : 0xeb0000000004 /* lmg */;
+ return push_inst(compiler, ins | R36A(reg1) | R32A(reg2) | R28A(base) | disp_s20((sljit_s32)memw));
+ }
+
+ ins = ((type & SLJIT_MEM_STORE) ? 0xe30000000024 /* stg */ : 0xe30000000004 /* lg */) | R32A(offs) | R28A(base);
+
+ if (!(type & SLJIT_MEM_STORE) && base == reg1) {
+ FAIL_IF(push_inst(compiler, ins | R36A(reg2) | disp_s20((sljit_s32)memw + SSIZE_OF(sw))));
+ return push_inst(compiler, ins | R36A(reg1) | disp_s20((sljit_s32)memw));
+ }
+
+ FAIL_IF(push_inst(compiler, ins | R36A(reg1) | disp_s20((sljit_s32)memw)));
+ return push_inst(compiler, ins | R36A(reg2) | disp_s20((sljit_s32)memw + SSIZE_OF(sw)));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 alignment = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ struct addr addr;
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_mov(compiler, type, freg, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (!(srcdst & SLJIT_MEM)) {
+ if (type & SLJIT_SIMD_STORE)
+ ins = F36(srcdst) | F32(freg);
+ else
+ ins = F36(freg) | F32(srcdst);
+
+ return push_inst(compiler, 0xe70000000056 /* vlr */ | ins);
+ }
+
+ FAIL_IF(make_addr_bx(compiler, &addr, srcdst, srcdstw, tmp1));
+ ins = F36(freg) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset);
+
+ if (alignment >= 4)
+ ins |= 4 << 12;
+ else if (alignment == 3)
+ ins |= 3 << 12;
+
+ return push_inst(compiler, ((type & SLJIT_SIMD_STORE) ? 0xe7000000000e /* vst */ : 0xe70000000006 /* vl */) | ins);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
sljit_s32 src, sljit_sw srcw)
{
- sljit_u8 mask = get_cc(compiler, type & 0xff);
- sljit_gpr dst_r = gpr(dst_reg & ~SLJIT_I32_OP);
- sljit_gpr src_r = FAST_IS_REG(src) ? gpr(src) : tmp0;
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ struct addr addr;
+ sljit_gpr reg;
+ sljit_sw sign_ext;
CHECK_ERROR();
- CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
+ CHECK(check_sljit_emit_simd_replicate(compiler, type, freg, src, srcw));
- if (src & SLJIT_IMM) {
- /* TODO(mundaym): fast path with lscond2 */
- FAIL_IF(push_load_imm_inst(compiler, src_r, srcw));
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && elem_size < 2)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(make_addr_bx(compiler, &addr, src, srcw, tmp1));
+ return push_inst(compiler, 0xe70000000005 /* vlrep */ | F36(freg)
+ | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset) | ((sljit_ins)elem_size << 12));
}
- #define LEVAL(i) i(dst_r, src_r, mask)
- if (have_lscond1())
- return push_inst(compiler,
- WHEN2(dst_reg & SLJIT_I32_OP, locr, locgr));
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (src == SLJIT_IMM)
+ return push_inst(compiler, 0xe70000000044 /* vgbm */ | F36(freg));
- #undef LEVAL
+ return push_inst(compiler, 0xe7000000004d /* vrep */ | F36(freg) | F32(src) | ((sljit_ins)elem_size << 12));
+ }
+
+ if (src == SLJIT_IMM) {
+ sign_ext = 0x10000;
+
+ switch (elem_size) {
+ case 0:
+ srcw &= 0xff;
+ sign_ext = (sljit_s8)srcw;
+ break;
+ case 1:
+ srcw &= 0xffff;
+ sign_ext = (sljit_s16)srcw;
+ break;
+ case 2:
+ if ((sljit_s32)srcw == (sljit_s16)srcw) {
+ srcw &= 0xffff;
+ sign_ext = (sljit_s16)srcw;
+ } else
+ srcw &= 0xffffffff;
+ break;
+ default:
+ if (srcw == (sljit_s16)srcw) {
+ srcw &= 0xffff;
+ sign_ext = (sljit_s16)srcw;
+ }
+ break;
+ }
+
+ if (sign_ext != 0x10000) {
+ if (sign_ext == 0 || sign_ext == -1)
+ return push_inst(compiler, 0xe70000000044 /* vgbm */ | F36(freg)
+ | (sign_ext == 0 ? 0 : ((sljit_ins)0xffff << 16)));
+
+ return push_inst(compiler, 0xe70000000045 /* vrepi */ | F36(freg)
+ | ((sljit_ins)srcw << 16) | ((sljit_ins)elem_size << 12));
+ }
+
+ push_load_imm_inst(compiler, tmp0, srcw);
+ reg = tmp0;
+ } else
+ reg = gpr(src);
+
+ FAIL_IF(push_inst(compiler, 0xe70000000022 /* vlvg */ | F36(freg) | R32A(reg) | ((sljit_ins)elem_size << 12)));
+ return push_inst(compiler, 0xe7000000004d /* vrep */ | F36(freg) | F32(freg) | ((sljit_ins)elem_size << 12));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg, sljit_s32 lane_index,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ struct addr addr;
+ sljit_gpr reg;
+ sljit_ins ins = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_mov(compiler, type, freg, lane_index, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && elem_size < 2)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (srcdst & SLJIT_MEM) {
+ FAIL_IF(make_addr_bx(compiler, &addr, srcdst, srcdstw, tmp1));
+ ins = F36(freg) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset);
+ }
+
+ if (type & SLJIT_SIMD_LANE_ZERO) {
+ if ((srcdst & SLJIT_MEM) && lane_index == ((1 << (3 - elem_size)) - 1))
+ return push_inst(compiler, 0xe70000000004 /* vllez */ | ins | ((sljit_ins)elem_size << 12));
+
+ if ((type & SLJIT_SIMD_FLOAT) && freg == srcdst) {
+ FAIL_IF(push_inst(compiler, 0xe70000000056 /* vlr */ | F36(TMP_FREG1) | F32(freg)));
+ srcdst = TMP_FREG1;
+ srcdstw = 0;
+ }
+
+ FAIL_IF(push_inst(compiler, 0xe70000000044 /* vgbm */ | F36(freg)));
+ }
+
+ if (srcdst & SLJIT_MEM) {
+ switch (elem_size) {
+ case 0:
+ ins |= 0xe70000000000 /* vleb */;
+ break;
+ case 1:
+ ins |= 0xe70000000001 /* vleh */;
+ break;
+ case 2:
+ ins |= 0xe70000000003 /* vlef */;
+ break;
+ default:
+ ins |= 0xe70000000002 /* vleg */;
+ break;
+ }
+
+ /* Convert to vsteb - vsteg */
+ if (type & SLJIT_SIMD_STORE)
+ ins |= 0x8;
+
+ return push_inst(compiler, ins | ((sljit_ins)lane_index << 12));
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (type & SLJIT_SIMD_STORE)
+ return push_inst(compiler, 0xe7000000004d /* vrep */ | F36(srcdst) | F32(freg) | ((sljit_ins)lane_index << 16) | ((sljit_ins)elem_size << 12));
+
+ if (elem_size == 3) {
+ if (lane_index == 0)
+ ins = F32(srcdst) | F28(freg) | (1 << 12);
+ else
+ ins = F32(freg) | F28(srcdst);
+
+ return push_inst(compiler, 0xe70000000084 /* vpdi */ | F36(freg) | ins);
+ }
+
+ FAIL_IF(push_inst(compiler, 0xe70000000021 /* vlgv */ | R36A(tmp0) | F32(srcdst) | ((sljit_ins)2 << 12)));
+ return push_inst(compiler, 0xe70000000022 /* vlvg */ | F36(freg) | R32A(tmp0) | ((sljit_ins)lane_index << 16) | ((sljit_ins)2 << 12));
+ }
+
+ if (srcdst == SLJIT_IMM) {
+ switch (elem_size) {
+ case 0:
+ ins = 0xe70000000040 /* vleib */;
+ srcdstw &= 0xff;
+ break;
+ case 1:
+ ins = 0xe70000000041 /* vleih */;
+ srcdstw &= 0xffff;
+ break;
+ case 2:
+ if ((sljit_s32)srcdstw == (sljit_s16)srcdstw) {
+ srcdstw &= 0xffff;
+ ins = 0xe70000000043 /* vleif */;
+ } else
+ srcdstw &= 0xffffffff;
+ break;
+ default:
+ if (srcdstw == (sljit_s16)srcdstw) {
+ srcdstw &= 0xffff;
+ ins = 0xe70000000042 /* vleig */;
+ }
+ break;
+ }
+
+ if (ins != 0)
+ return push_inst(compiler, ins | F36(freg) | ((sljit_ins)srcdstw << 16) | ((sljit_ins)lane_index << 12));
+
+ push_load_imm_inst(compiler, tmp0, srcdstw);
+ reg = tmp0;
+ } else
+ reg = gpr(srcdst);
+
+ ins = ((sljit_ins)lane_index << 16) | ((sljit_ins)elem_size << 12);
+
+ if (!(type & SLJIT_SIMD_STORE))
+ return push_inst(compiler, 0xe70000000022 /* vlvg */ | F36(freg) | R32A(reg) | ins);
+
+ FAIL_IF(push_inst(compiler, 0xe70000000021 /* vlgv */ | R36A(reg) | F32(freg) | ins));
+
+ if (!(type & SLJIT_SIMD_LANE_SIGNED) || elem_size >= 3)
+ return SLJIT_SUCCESS;
+
+ switch (elem_size) {
+ case 0:
+ ins = 0xb9060000 /* lgbr */;
+ break;
+ case 1:
+ ins = 0xb9070000 /* lghr */;
+ break;
+ default:
+ ins = 0xb9140000 /* lgfr */;
+ break;
+ }
+
+ return push_inst(compiler, ins | R4A(reg) | R0A(reg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_s32 src_lane_index)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_replicate(compiler, type, freg, src, src_lane_index));
+
+ if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && elem_size < 2)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, 0xe7000000004d /* vrep */ | F36(freg) | F32(src)
+ | ((sljit_ins)src_lane_index << 16) | ((sljit_ins)elem_size << 12));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_extend(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 elem2_size = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ struct addr addr;
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_extend(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && elem_size < 2)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(make_addr_bx(compiler, &addr, src, srcw, tmp1));
+ ins = F36(freg) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset);
+
+ switch (elem2_size - elem_size) {
+ case 1:
+ ins |= 0xe70000000002 /* vleg */;
+ break;
+ case 2:
+ ins |= 0xe70000000003 /* vlef */;
+ break;
+ default:
+ ins |= 0xe70000000001 /* vleh */;
+ break;
+ }
+
+ FAIL_IF(push_inst(compiler, ins));
+ src = freg;
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ FAIL_IF(push_inst(compiler, 0xe700000000d5 /* vuplh */ | F36(freg) | F32(src) | (2 << 12)));
+ FAIL_IF(push_inst(compiler, 0xe70000000030 /* vesl */ | F36(freg) | F32(freg) | (32 << 16) | (3 << 12)));
+ return push_inst(compiler, 0xe700000000c4 /* vfll */ | F36(freg) | F32(freg) | (2 << 12));
+ }
+
+ ins = ((type & SLJIT_SIMD_EXTEND_SIGNED) ? 0xe700000000d7 /* vuph */ : 0xe700000000d5 /* vuplh */) | F36(freg);
+
+ do {
+ FAIL_IF(push_inst(compiler, ins | F32(src) | ((sljit_ins)elem_size << 12)));
+ src = freg;
+ } while (++elem_size < elem2_size);
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_sign(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_gpr dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_sign(compiler, type, freg, dst, dstw));
+
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && elem_size < 2)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ switch (elem_size) {
+ case 0:
+ push_load_imm_inst(compiler, tmp0, (sljit_sw)0x4048505860687078);
+ push_load_imm_inst(compiler, tmp1, (sljit_sw)0x0008101820283038);
+ FAIL_IF(push_inst(compiler, 0xe70000000062 /* vlvgp */ | F36(TMP_FREG1) | R32A(tmp1) | R28A(tmp0)));
+ break;
+ case 1:
+ push_load_imm_inst(compiler, tmp0, (sljit_sw)0x0010203040506070);
+ break;
+ case 2:
+ push_load_imm_inst(compiler, tmp0, (sljit_sw)0x8080808000204060);
+ break;
+ default:
+ push_load_imm_inst(compiler, tmp0, (sljit_sw)0x8080808080800040);
+ break;
+ }
+
+ if (elem_size != 0)
+ FAIL_IF(push_inst(compiler, 0xe70000000022 /* vlvg */ | F36(TMP_FREG1) | R32A(tmp0) | (1 << 16) | (3 << 12)));
+
+ FAIL_IF(push_inst(compiler, 0xe70000000085 /* vbperm */ | F36(TMP_FREG1) | F32(freg) | F28(TMP_FREG1)));
+
+ dst_r = FAST_IS_REG(dst) ? gpr(dst) : tmp0;
+ FAIL_IF(push_inst(compiler, 0xe70000000021 /* vlgv */ | R36A(dst_r) | F32(TMP_FREG1)
+ | (elem_size == 0 ? ((3 << 16) | (1 << 12)) : (7 << 16))));
+
+ if (dst_r == tmp0)
+ return store_word(compiler, tmp0, dst, dstw, type & SLJIT_32);
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_op2(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src1_freg, sljit_s32 src2_freg)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_op2(compiler, type, dst_freg, src1_freg, src2_freg));
+
+ if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ switch (SLJIT_SIMD_GET_OPCODE(type)) {
+ case SLJIT_SIMD_OP2_AND:
+ ins = 0xe70000000068 /* vn */;
+ break;
+ case SLJIT_SIMD_OP2_OR:
+ ins = 0xe7000000006a /* vo */;
+ break;
+ case SLJIT_SIMD_OP2_XOR:
+ ins = 0xe7000000006d /* vx */;
+ break;
+ }
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, ins | F36(dst_freg) | F32(src1_freg) | F28(src2_freg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_load(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 mem_reg)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_load(compiler, op, dst_reg, mem_reg));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op1(compiler, op, dst_reg, 0, SLJIT_MEM1(mem_reg), 0);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_store(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src_reg,
+ sljit_s32 mem_reg,
+ sljit_s32 temp_reg)
+{
+ sljit_ins mask;
+ sljit_gpr tmp_r = gpr(temp_reg);
+ sljit_gpr mem_r = gpr(mem_reg);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_store(compiler, op, src_reg, mem_reg, temp_reg));
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV32:
+ case SLJIT_MOV_U32:
+ return push_inst(compiler, 0xba000000 /* cs */ | R20A(tmp_r) | R16A(gpr(src_reg)) | R12A(mem_r));
+ case SLJIT_MOV_U8:
+ mask = 0xff;
+ break;
+ case SLJIT_MOV_U16:
+ mask = 0xffff;
+ break;
+ default:
+ return push_inst(compiler, 0xeb0000000030 /* csg */ | R36A(tmp_r) | R32A(gpr(src_reg)) | R28A(mem_r));
+ }
+
+ /* tmp0 = (src_reg ^ tmp_r) & mask */
+ FAIL_IF(push_inst(compiler, 0xa50f0000 /* llill */ | R20A(tmp1) | mask));
+ FAIL_IF(push_inst(compiler, 0xb9e70000 /* xgrk */ | R4A(tmp0) | R0A(gpr(src_reg)) | R12A(tmp_r)));
+ FAIL_IF(push_inst(compiler, 0xa7090000 /* lghi */ | R20A(tmp_r) | 0xfffc));
+ FAIL_IF(push_inst(compiler, 0xb9800000 /* ngr */ | R4A(tmp0) | R0A(tmp1)));
+
+ /* tmp0 = tmp0 << (((mem_r ^ 0x3) & 0x3) << 3) */
+ FAIL_IF(push_inst(compiler, 0xa50f0000 /* llill */ | R20A(tmp1) | (sljit_ins)((mask == 0xff) ? 0x18 : 0x10)));
+ FAIL_IF(push_inst(compiler, 0xb9800000 /* ngr */ | R4A(tmp_r) | R0A(mem_r)));
+ FAIL_IF(push_inst(compiler, 0xec0000000057 /* rxsbg */ | R36A(tmp1) | R32A(mem_r) | (59 << 24) | (60 << 16) | (3 << 8)));
+ FAIL_IF(push_inst(compiler, 0xeb000000000d /* sllg */ | R36A(tmp0) | R32A(tmp0) | R28A(tmp1)));
+
+ /* Already computed: tmp_r = mem_r & ~0x3 */
- /* TODO(mundaym): implement */
- return SLJIT_ERR_UNSUPPORTED;
+ FAIL_IF(push_inst(compiler, 0x58000000 /* l */ | R20A(tmp1) | R12A(tmp_r)));
+ FAIL_IF(push_inst(compiler, 0x1700 /* x */ | R4A(tmp0) | R0A(tmp1)));
+ return push_inst(compiler, 0xba000000 /* cs */ | R20A(tmp1) | R16A(tmp0) | R12A(tmp_r));
}
/* --------------------------------------------------------------------- */
@@ -2991,7 +4500,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
{
- sljit_set_jump_addr(addr, new_constant, executable_offset);
+ sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
}
SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label *sljit_emit_put_label(
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c
index 28886405af..218992b355 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c
@@ -35,16 +35,13 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst,
#define ARG2(flags, src2) ((flags & SRC2_IMM) ? IMM(src2) : S2(src2))
-static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
+static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_u32 flags,
sljit_s32 dst, sljit_s32 src1, sljit_sw src2)
{
SLJIT_COMPILE_ASSERT(ICC_IS_SET == SET_FLAGS, icc_is_set_and_set_flags_must_be_the_same);
switch (op) {
case SLJIT_MOV:
- case SLJIT_MOV_U32:
- case SLJIT_MOV_S32:
- case SLJIT_MOV_P:
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
if (dst != src2)
return push_inst(compiler, OR | D(dst) | S1(0) | S2(src2), DR(dst));
@@ -59,8 +56,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(24), DR(dst)));
return push_inst(compiler, SRA | D(dst) | S1(dst) | IMM(24), DR(dst));
}
- else if (dst != src2)
- SLJIT_UNREACHABLE();
+ SLJIT_ASSERT(dst == src2);
return SLJIT_SUCCESS;
case SLJIT_MOV_U16:
@@ -70,13 +66,12 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(16), DR(dst)));
return push_inst(compiler, (op == SLJIT_MOV_S16 ? SRA : SRL) | D(dst) | S1(dst) | IMM(16), DR(dst));
}
- else if (dst != src2)
- SLJIT_UNREACHABLE();
+ SLJIT_ASSERT(dst == src2);
return SLJIT_SUCCESS;
case SLJIT_NOT:
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
- return push_inst(compiler, XNOR | (flags & SET_FLAGS) | D(dst) | S1(0) | S2(src2), DR(dst) | (flags & SET_FLAGS));
+ return push_inst(compiler, XNOR | (flags & SET_FLAGS) | D(dst) | S1(0) | S2(src2), DRF(dst, flags));
case SLJIT_CLZ:
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
@@ -89,22 +84,24 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
/* Loop. */
FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(0), SET_FLAGS));
FAIL_IF(push_inst(compiler, SLL | D(TMP_REG1) | S1(TMP_REG1) | IMM(1), DR(TMP_REG1)));
- FAIL_IF(push_inst(compiler, BICC | DA(0xe) | (-2 & DISP_MASK), UNMOVABLE_INS));
+ FAIL_IF(push_inst(compiler, BICC | DA(0xe) | ((sljit_ins)-2 & DISP_MASK), UNMOVABLE_INS));
return push_inst(compiler, ADD | D(dst) | S1(dst) | IMM(1), UNMOVABLE_INS);
case SLJIT_ADD:
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
- return push_inst(compiler, ADD | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS));
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+ return push_inst(compiler, ADD | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
case SLJIT_ADDC:
- return push_inst(compiler, ADDC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS));
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
+ return push_inst(compiler, ADDC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
case SLJIT_SUB:
- compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
- return push_inst(compiler, SUB | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS));
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
+ return push_inst(compiler, SUB | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
case SLJIT_SUBC:
- return push_inst(compiler, SUBC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS));
+ compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
+ return push_inst(compiler, SUBC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
case SLJIT_MUL:
compiler->status_flags_state = 0;
@@ -116,13 +113,13 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
return push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(TMP_LINK), MOVABLE_INS | SET_FLAGS);
case SLJIT_AND:
- return push_inst(compiler, AND | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS));
+ return push_inst(compiler, AND | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
case SLJIT_OR:
- return push_inst(compiler, OR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS));
+ return push_inst(compiler, OR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
case SLJIT_XOR:
- return push_inst(compiler, XOR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS));
+ return push_inst(compiler, XOR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
case SLJIT_SHL:
FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst)));
@@ -147,7 +144,7 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
sljit_s32 word_reg_index = 8;
sljit_s32 float_arg_index = 1;
sljit_s32 double_arg_count = 0;
- sljit_s32 float_offset = (16 + 6) * sizeof(sljit_sw);
+ sljit_u32 float_offset = (16 + 6) * sizeof(sljit_sw);
sljit_s32 types = 0;
sljit_s32 reg = 0;
sljit_s32 move_to_tmp2 = 0;
@@ -155,18 +152,12 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
if (src)
reg = reg_map[*src & REG_MASK];
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+ types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- float_arg_index++;
- if (reg_index == reg)
- move_to_tmp2 = 1;
- reg_index++;
- break;
+ switch (arg_types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
float_arg_index++;
double_arg_count++;
@@ -174,36 +165,37 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
move_to_tmp2 = 1;
reg_index += 2;
break;
+ case SLJIT_ARG_TYPE_F32:
+ float_arg_index++;
+ if (reg_index == reg)
+ move_to_tmp2 = 1;
+ reg_index++;
+ break;
default:
- if (reg_index != word_reg_index && reg_index < 14 && reg_index == reg)
+ if (reg_index != word_reg_index && reg_index == reg)
move_to_tmp2 = 1;
reg_index++;
word_reg_index++;
break;
}
- if (move_to_tmp2) {
- move_to_tmp2 = 0;
- if (reg < 14)
- FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2A(reg), DR(TMP_REG1)));
- *src = TMP_REG1;
- }
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
- arg_types >>= SLJIT_DEF_SHIFT;
+ if (move_to_tmp2) {
+ if (reg < 14)
+ FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2A(reg), DR(TMP_REG1)));
+ *src = TMP_REG1;
}
arg_types = types;
while (arg_types) {
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- float_arg_index--;
- FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
- float_offset -= sizeof(sljit_f64);
- break;
+ switch (arg_types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
float_arg_index--;
if (float_arg_index == 4 && double_arg_count == 4) {
+ /* The address is not doubleword aligned, so two instructions are required to store the double. */
FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM((16 + 7) * sizeof(sljit_sw)), MOVABLE_INS));
FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | (1 << 25) | S1(SLJIT_SP) | IMM((16 + 8) * sizeof(sljit_sw)), MOVABLE_INS));
}
@@ -211,36 +203,41 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
FAIL_IF(push_inst(compiler, STDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
float_offset -= sizeof(sljit_f64);
break;
+ case SLJIT_ARG_TYPE_F32:
+ float_arg_index--;
+ FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
+ float_offset -= sizeof(sljit_f64);
+ break;
default:
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
float_offset = (16 + 6) * sizeof(sljit_sw);
while (types) {
- switch (types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- reg_index--;
- if (reg_index < 14)
- FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
- float_offset -= sizeof(sljit_f64);
- break;
+ switch (types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
reg_index -= 2;
if (reg_index < 14) {
if ((reg_index & 0x1) != 0) {
FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
- if (reg_index < 13)
+ if (reg_index < 8 + 6 - 1)
FAIL_IF(push_inst(compiler, LDUW | DA(reg_index + 1) | S1(SLJIT_SP) | IMM(float_offset + sizeof(sljit_sw)), reg_index + 1));
}
- else
+ else
FAIL_IF(push_inst(compiler, LDD | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
}
float_offset -= sizeof(sljit_f64);
break;
+ case SLJIT_ARG_TYPE_F32:
+ reg_index--;
+ if (reg_index < 8 + 6)
+ FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
+ float_offset -= sizeof(sljit_f64);
+ break;
default:
reg_index--;
word_reg_index--;
@@ -254,7 +251,7 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
break;
}
- types >>= SLJIT_DEF_SHIFT;
+ types >>= SLJIT_ARG_SHIFT;
}
return SLJIT_SUCCESS;
@@ -282,5 +279,5 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
{
- sljit_set_jump_addr(addr, new_constant, executable_offset);
+ sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c
index e833f09d7a..c8d19e16c6 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c
@@ -98,36 +98,37 @@ static void sparc_cache_flush(sljit_ins *from, sljit_ins *to)
#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = {
- 0, 8, 9, 10, 11, 29, 28, 27, 23, 22, 21, 20, 19, 18, 17, 16, 26, 25, 24, 14, 1, 12, 13, 15
+ 0, 8, 9, 10, 11, 23, 22, 21, 20, 19, 18, 17, 16, 29, 28, 27, 26, 25, 24, 14, 1, 12, 13, 15
};
static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
- 0, 0, 2, 4, 6, 8, 10, 12, 14
+ 0, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
};
/* --------------------------------------------------------------------- */
/* Instrucion forms */
/* --------------------------------------------------------------------- */
-#define D(d) (reg_map[d] << 25)
-#define FD(d) (freg_map[d] << 25)
-#define FDN(d) ((freg_map[d] | 0x1) << 25)
-#define DA(d) ((d) << 25)
-#define S1(s1) (reg_map[s1] << 14)
-#define FS1(s1) (freg_map[s1] << 14)
-#define S1A(s1) ((s1) << 14)
-#define S2(s2) (reg_map[s2])
-#define FS2(s2) (freg_map[s2])
-#define FS2N(s2) (freg_map[s2] | 0x1)
-#define S2A(s2) (s2)
+#define D(d) ((sljit_ins)reg_map[d] << 25)
+#define FD(d) ((sljit_ins)freg_map[d] << 25)
+#define FDN(d) (((sljit_ins)freg_map[d] | 0x1) << 25)
+#define DA(d) ((sljit_ins)(d) << 25)
+#define S1(s1) ((sljit_ins)reg_map[s1] << 14)
+#define FS1(s1) ((sljit_ins)freg_map[s1] << 14)
+#define S1A(s1) ((sljit_ins)(s1) << 14)
+#define S2(s2) ((sljit_ins)reg_map[s2])
+#define FS2(s2) ((sljit_ins)freg_map[s2])
+#define FS2N(s2) ((sljit_ins)freg_map[s2] | 0x1)
+#define S2A(s2) ((sljit_ins)(s2))
#define IMM_ARG 0x2000
-#define DOP(op) ((op) << 5)
-#define IMM(imm) (((imm) & 0x1fff) | IMM_ARG)
+#define DOP(op) ((sljit_ins)(op) << 5)
+#define IMM(imm) (((sljit_ins)(imm) & 0x1fff) | IMM_ARG)
#define DR(dr) (reg_map[dr])
-#define OPC1(opcode) ((opcode) << 30)
-#define OPC2(opcode) ((opcode) << 22)
-#define OPC3(opcode) ((opcode) << 19)
+#define DRF(dr, flags) ((sljit_s32)(reg_map[dr] | ((flags) & SET_FLAGS)))
+#define OPC1(opcode) ((sljit_ins)(opcode) << 30)
+#define OPC2(opcode) ((sljit_ins)(opcode) << 22)
+#define OPC3(opcode) ((sljit_ins)(opcode) << 19)
#define SET_FLAGS OPC3(0x10)
#define ADD (OPC1(0x2) | OPC3(0x00))
@@ -156,6 +157,8 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define FSUBS (OPC1(0x2) | OPC3(0x34) | DOP(0x45))
#define JMPL (OPC1(0x2) | OPC3(0x38))
#define LDD (OPC1(0x3) | OPC3(0x03))
+#define LDDF (OPC1(0x3) | OPC3(0x23))
+#define LDF (OPC1(0x3) | OPC3(0x20))
#define LDUW (OPC1(0x3) | OPC3(0x00))
#define NOP (OPC1(0x0) | OPC2(0x04))
#define OR (OPC1(0x2) | OPC3(0x02))
@@ -170,6 +173,7 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define SRAX (OPC1(0x2) | OPC3(0x27) | (1 << 12))
#define SRL (OPC1(0x2) | OPC3(0x26))
#define SRLX (OPC1(0x2) | OPC3(0x26) | (1 << 12))
+#define STD (OPC1(0x3) | OPC3(0x07))
#define STDF (OPC1(0x3) | OPC3(0x27))
#define STF (OPC1(0x3) | OPC3(0x24))
#define STW (OPC1(0x3) | OPC3(0x04))
@@ -183,7 +187,7 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
#define MAX_DISP (0x1fffff)
#define MIN_DISP (-0x200000)
-#define DISP_MASK (0x3fffff)
+#define DISP_MASK ((sljit_ins)0x3fffff)
#define BICC (OPC1(0x0) | OPC2(0x2))
#define FBFCC (OPC1(0x0) | OPC2(0x6))
@@ -274,7 +278,7 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i
}
}
- diff += sizeof(sljit_ins);
+ diff += SSIZE_OF(ins);
if (diff <= MAX_DISP && diff >= MIN_DISP) {
jump->flags |= PATCH_B;
@@ -300,7 +304,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
sljit_uw word_count;
sljit_uw next_addr;
sljit_sw executable_offset;
- sljit_uw addr;
+ sljit_sw addr;
struct sljit_label *label;
struct sljit_jump *jump;
@@ -340,7 +344,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (label && label->size == word_count) {
/* Just recording the address. */
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
if (jump && jump->addr == word_count) {
@@ -373,7 +377,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
if (label && label->size == word_count) {
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
}
@@ -386,27 +390,27 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
jump = compiler->jumps;
while (jump) {
do {
- addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
+ addr = (sljit_sw)((jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target);
buf_ptr = (sljit_ins *)jump->addr;
if (jump->flags & PATCH_CALL) {
- addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
- SLJIT_ASSERT((sljit_sw)addr <= 0x1fffffff && (sljit_sw)addr >= -0x20000000);
- buf_ptr[0] = CALL | (addr & 0x3fffffff);
+ addr = (addr - (sljit_sw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
+ SLJIT_ASSERT(addr <= 0x1fffffff && addr >= -0x20000000);
+ buf_ptr[0] = CALL | ((sljit_ins)addr & 0x3fffffff);
break;
}
if (jump->flags & PATCH_B) {
- addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
- SLJIT_ASSERT((sljit_sw)addr <= MAX_DISP && (sljit_sw)addr >= MIN_DISP);
- buf_ptr[0] = (buf_ptr[0] & ~DISP_MASK) | (addr & DISP_MASK);
+ addr = (addr - (sljit_sw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
+ SLJIT_ASSERT(addr <= MAX_DISP && addr >= MIN_DISP);
+ buf_ptr[0] = (buf_ptr[0] & ~DISP_MASK) | ((sljit_ins)addr & DISP_MASK);
break;
}
/* Set the fields of immediate loads. */
#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
SLJIT_ASSERT(((buf_ptr[0] & 0xc1cfffff) == 0x01000000) && ((buf_ptr[1] & 0xc1f83fff) == 0x80102000));
- buf_ptr[0] |= (addr >> 10) & 0x3fffff;
- buf_ptr[1] |= addr & 0x3ff;
+ buf_ptr[0] |= (sljit_ins)(addr >> 10) & 0x3fffff;
+ buf_ptr[1] |= (sljit_ins)addr & 0x3ff;
#else
#error "Implementation required"
#endif
@@ -416,7 +420,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
put_label = compiler->put_labels;
while (put_label) {
- addr = put_label->label->addr;
+ addr = (sljit_sw)put_label->label->addr;
buf_ptr = (sljit_ins *)put_label->addr;
#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
@@ -431,7 +435,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
compiler->error = SLJIT_ERR_COMPILED;
compiler->executable_offset = executable_offset;
- compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins);
+ compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_ins);
code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
@@ -487,13 +491,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#define ALT_KEEP_CACHE 0x00040
#define CUMULATIVE_OP 0x00080
#define IMM_OP 0x00100
-#define SRC2_IMM 0x00200
+#define MOVE_OP 0x00200
+#define SRC2_IMM 0x00400
-#define REG_DEST 0x00400
-#define REG2_SOURCE 0x00800
-#define SLOW_SRC1 0x01000
-#define SLOW_SRC2 0x02000
-#define SLOW_DEST 0x04000
+#define REG_DEST 0x00800
+#define REG2_SOURCE 0x01000
+#define SLOW_SRC1 0x02000
+#define SLOW_SRC2 0x04000
+#define SLOW_DEST 0x08000
/* SET_FLAGS (0x10 << 19) also belong here! */
@@ -507,6 +512,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
+ sljit_s32 reg_index, types, tmp;
+ sljit_u32 float_offset, args_offset;
+ sljit_s32 saved_arg_index, scratch_arg_index, float_arg_index;
+
CHECK_ERROR();
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
@@ -514,7 +523,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7;
compiler->local_size = local_size;
- if (local_size <= SIMM_MAX) {
+ if (local_size <= -SIMM_MIN) {
FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | IMM(-local_size), UNMOVABLE_INS));
}
else {
@@ -522,7 +531,88 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | S2(TMP_REG1), UNMOVABLE_INS));
}
- /* Arguments are in their appropriate registers. */
+ arg_types >>= SLJIT_ARG_SHIFT;
+
+ types = arg_types;
+ float_offset = 16 * sizeof(sljit_sw);
+ reg_index = 24;
+
+ while (types && reg_index < 24 + 6) {
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (reg_index & 0x1) {
+ FAIL_IF(push_inst(compiler, STW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
+ if (reg_index >= 24 + 6 - 1)
+ break;
+ FAIL_IF(push_inst(compiler, STW | DA(reg_index + 1) | S1(SLJIT_SP) | IMM(float_offset + sizeof(sljit_sw)), MOVABLE_INS));
+ } else
+ FAIL_IF(push_inst(compiler, STD | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
+
+ float_offset += sizeof(sljit_f64);
+ reg_index++;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ FAIL_IF(push_inst(compiler, STW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
+ float_offset += sizeof(sljit_f64);
+ break;
+ }
+
+ reg_index++;
+ types >>= SLJIT_ARG_SHIFT;
+ }
+
+ args_offset = (16 + 1 + 6) * sizeof(sljit_sw);
+ float_offset = 16 * sizeof(sljit_sw);
+ reg_index = 24;
+ saved_arg_index = 24;
+ scratch_arg_index = 8 - 1;
+ float_arg_index = 1;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ if (reg_index < 24 + 6 - 1) {
+ FAIL_IF(push_inst(compiler, LDDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
+ } else if (reg_index < 24 + 6) {
+ FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | (1 << 25) | S1A(30) | IMM(args_offset), MOVABLE_INS));
+ } else {
+ FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1A(30) | IMM(args_offset), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | (1 << 25) | S1A(30) | IMM(args_offset + sizeof(sljit_sw)), MOVABLE_INS));
+ }
+
+ float_arg_index++;
+ float_offset += sizeof(sljit_f64);
+ reg_index++;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ if (reg_index < 24 + 6)
+ FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
+ else
+ FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1A(30) | IMM(args_offset), MOVABLE_INS));
+ float_arg_index++;
+ float_offset += sizeof(sljit_f64);
+ break;
+ default:
+ scratch_arg_index++;
+
+ if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
+ tmp = saved_arg_index++;
+ if (tmp == reg_index)
+ break;
+ } else
+ tmp = scratch_arg_index;
+
+ if (reg_index < 24 + 6)
+ FAIL_IF(push_inst(compiler, OR | DA(tmp) | S1(0) | S2A(reg_index), tmp));
+ else
+ FAIL_IF(push_inst(compiler, LDUW | DA(tmp) | S1A(30) | IMM(args_offset), tmp));
+ break;
+ }
+
+ reg_index++;
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
return SLJIT_SUCCESS;
}
@@ -539,12 +629,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
return SLJIT_SUCCESS;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
+
+ FAIL_IF(push_inst(compiler, JMPL | D(0) | S1A(31) | IMM(8), UNMOVABLE_INS));
+ return push_inst(compiler, RESTORE | D(SLJIT_R0) | S1(SLJIT_R0) | S2(0), UNMOVABLE_INS);
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
{
CHECK_ERROR();
CHECK(check_sljit_emit_return(compiler, op, src, srcw));
- if (op != SLJIT_MOV || !FAST_IS_REG(src)) {
+ if (TYPE_CAST_NEEDED(op) || !FAST_IS_REG(src)) {
FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
src = SLJIT_R0;
}
@@ -591,7 +690,7 @@ static const sljit_ins data_transfer_insts[16 + 4] = {
#undef ARCH_32_64
/* Can perform an operation using at most 1 instruction. */
-static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
+static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_u32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
{
SLJIT_ASSERT(arg & SLJIT_MEM);
@@ -632,7 +731,7 @@ static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, slj
}
/* Emit the necessary instructions. See can_cache above. */
-static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
+static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_u32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
{
sljit_s32 base, arg2, delay_slot;
sljit_ins dest;
@@ -660,7 +759,7 @@ static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sl
arg2 = reg;
else /* It must be a mov operation, so tmp1 must be free to use. */
arg2 = TMP_REG1;
- FAIL_IF(push_inst(compiler, SLL_W | D(arg2) | S1(OFFS_REG(arg)) | IMM_ARG | argw, DR(arg2)));
+ FAIL_IF(push_inst(compiler, SLL_W | D(arg2) | S1(OFFS_REG(arg)) | IMM_ARG | (sljit_ins)argw, DR(arg2)));
}
}
else {
@@ -692,7 +791,7 @@ static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sl
return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot);
}
-static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
+static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_u32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
{
if (getput_arg_fast(compiler, flags, reg, arg, argw))
return compiler->error;
@@ -701,14 +800,14 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
return getput_arg(compiler, flags, reg, arg, argw, 0, 0);
}
-static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w)
+static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_u32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w)
{
if (getput_arg_fast(compiler, flags, reg, arg1, arg1w))
return compiler->error;
return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w);
}
-static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
+static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_u32 flags,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
@@ -727,11 +826,11 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
compiler->cache_argw = 0;
}
- if (dst != SLJIT_UNUSED) {
+ if (dst != TMP_REG2) {
if (FAST_IS_REG(dst)) {
dst_r = dst;
flags |= REG_DEST;
- if (op >= SLJIT_MOV && op <= SLJIT_MOV_P)
+ if (flags & MOVE_OP)
sugg_src2_r = dst_r;
}
else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, TMP_REG1, dst, dstw))
@@ -782,7 +881,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
if (FAST_IS_REG(src2)) {
src2_r = src2;
flags |= REG2_SOURCE;
- if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P)
+ if ((flags & (REG_DEST | MOVE_OP)) == MOVE_OP)
dst_r = src2_r;
}
else if (src2 & SLJIT_IMM) {
@@ -793,8 +892,12 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
}
else {
src2_r = 0;
- if ((op >= SLJIT_MOV && op <= SLJIT_MOV_P) && (dst & SLJIT_MEM))
- dst_r = 0;
+ if (flags & MOVE_OP) {
+ if (dst & SLJIT_MEM)
+ dst_r = 0;
+ else
+ op = SLJIT_MOV;
+ }
}
}
}
@@ -888,7 +991,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
- sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
+ sljit_u32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
CHECK_ERROR();
CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
@@ -898,33 +1001,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
op = GET_OPCODE(op);
switch (op) {
case SLJIT_MOV:
- case SLJIT_MOV_P:
- return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
-
+#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
case SLJIT_MOV_U32:
- return emit_op(compiler, SLJIT_MOV_U32, flags | INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
-
case SLJIT_MOV_S32:
- return emit_op(compiler, SLJIT_MOV_S32, flags | INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
+ case SLJIT_MOV32:
+#endif
+ case SLJIT_MOV_P:
+ return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, srcw);
case SLJIT_MOV_U8:
- return emit_op(compiler, SLJIT_MOV_U8, flags | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_U8, flags | BYTE_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
case SLJIT_MOV_S8:
- return emit_op(compiler, SLJIT_MOV_S8, flags | BYTE_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_S8, flags | BYTE_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
case SLJIT_MOV_U16:
- return emit_op(compiler, SLJIT_MOV_U16, flags | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_U16, flags | HALF_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
case SLJIT_MOV_S16:
- return emit_op(compiler, SLJIT_MOV_S16, flags | HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_S16, flags | HALF_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
case SLJIT_NOT:
case SLJIT_CLZ:
return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
-
- case SLJIT_NEG:
- return emit_op(compiler, SLJIT_SUB, flags | IMM_OP, dst, dstw, SLJIT_IMM, 0, src, srcw);
}
return SLJIT_SUCCESS;
@@ -935,17 +1034,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
+ sljit_u32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
CHECK_ERROR();
- CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
- return SLJIT_SUCCESS;
-
op = GET_OPCODE(op);
switch (op) {
case SLJIT_ADD:
@@ -975,6 +1071,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return SLJIT_SUCCESS;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+ return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w);
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw)
{
@@ -1015,7 +1125,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
CHECK_ERROR();
CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
@@ -1027,8 +1137,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
/* Floating point operators */
/* --------------------------------------------------------------------- */
-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 7))
-#define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double)
+#define FLOAT_DATA(op) ((sljit_ins)DOUBLE_DATA | (((sljit_ins)(op) & SLJIT_32) >> 7))
+#define SELECT_FOP(op, single, double) ((op & SLJIT_32) ? single : double)
#define FLOAT_TMP_MEM_OFFSET (22 * sizeof(sljit_sw))
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1108,11 +1218,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_32 == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
@@ -1126,7 +1236,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
if (src != dst_r) {
if (dst_r != TMP_FREG1) {
FAIL_IF(push_inst(compiler, FMOVS | FD(dst_r) | FS2(src), MOVABLE_INS));
- if (!(op & SLJIT_F32_OP))
+ if (!(op & SLJIT_32))
FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
}
else
@@ -1135,17 +1245,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
break;
case SLJIT_NEG_F64:
FAIL_IF(push_inst(compiler, FNEGS | FD(dst_r) | FS2(src), MOVABLE_INS));
- if (dst_r != src && !(op & SLJIT_F32_OP))
+ if (dst_r != src && !(op & SLJIT_32))
FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
break;
case SLJIT_ABS_F64:
FAIL_IF(push_inst(compiler, FABSS | FD(dst_r) | FS2(src), MOVABLE_INS));
- if (dst_r != src && !(op & SLJIT_F32_OP))
+ if (dst_r != src && !(op & SLJIT_32))
FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
break;
case SLJIT_CONV_F64_FROM_F32:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOD, FDTOS) | FD(dst_r) | FS2(src), MOVABLE_INS));
- op ^= SLJIT_F32_OP;
+ op ^= SLJIT_32;
break;
}
@@ -1288,10 +1398,12 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
case SLJIT_LESS:
case SLJIT_GREATER_F64: /* Unordered. */
+ case SLJIT_CARRY:
return DA(0x5);
case SLJIT_GREATER_EQUAL:
case SLJIT_LESS_EQUAL_F64:
+ case SLJIT_NOT_CARRY:
return DA(0xd);
case SLJIT_GREATER:
@@ -1315,15 +1427,17 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
return DA(0x2);
case SLJIT_OVERFLOW:
- if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
+ if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)))
return DA(0x9);
+ /* fallthrough */
case SLJIT_UNORDERED_F64:
return DA(0x7);
case SLJIT_NOT_OVERFLOW:
- if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
+ if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)))
return DA(0x1);
+ /* fallthrough */
case SLJIT_ORDERED_F64:
return DA(0xf);
@@ -1412,7 +1526,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR);
- jump->u.target = srcw;
+ jump->u.target = (sljit_uw)srcw;
if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS)
jump->flags |= IS_MOVABLE;
@@ -1460,7 +1574,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
{
- sljit_s32 reg, flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
+ sljit_s32 reg;
+ sljit_u32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
CHECK_ERROR();
CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c
index 79a7e8bba5..ba4a1ebbc2 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c
@@ -26,6 +26,10 @@
/* x86 32-bit arch dependent functions. */
+/* --------------------------------------------------------------------- */
+/* Operators */
+/* --------------------------------------------------------------------- */
+
static sljit_s32 emit_do_imm(struct sljit_compiler *compiler, sljit_u8 opcode, sljit_sw imm)
{
sljit_u8 *inst;
@@ -38,314 +42,8 @@ static sljit_s32 emit_do_imm(struct sljit_compiler *compiler, sljit_u8 opcode, s
return SLJIT_SUCCESS;
}
-static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_sw executable_offset)
-{
- sljit_s32 type = jump->flags >> TYPE_SHIFT;
-
- if (type == SLJIT_JUMP) {
- *code_ptr++ = JMP_i32;
- jump->addr++;
- }
- else if (type >= SLJIT_FAST_CALL) {
- *code_ptr++ = CALL_i32;
- jump->addr++;
- }
- else {
- *code_ptr++ = GROUP_0F;
- *code_ptr++ = get_jump_code(type);
- jump->addr += 2;
- }
-
- if (jump->flags & JUMP_LABEL)
- jump->flags |= PATCH_MW;
- else
- sljit_unaligned_store_sw(code_ptr, jump->u.target - (jump->addr + 4) - (sljit_uw)executable_offset);
- code_ptr += 4;
-
- return code_ptr;
-}
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
- sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
-{
- sljit_s32 args, size;
- sljit_u8 *inst;
-
- CHECK_ERROR();
- CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
- set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
-
- /* Emit ENDBR32 at function entry if needed. */
- FAIL_IF(emit_endbranch(compiler));
-
- args = get_arg_count(arg_types);
- compiler->args = args;
-
- /* [esp+0] for saving temporaries and function calls. */
- compiler->stack_tmp_size = 2 * sizeof(sljit_sw);
-
-#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- if (scratches > 3)
- compiler->stack_tmp_size = 3 * sizeof(sljit_sw);
-#endif
-
- compiler->saveds_offset = compiler->stack_tmp_size;
- if (scratches > 3)
- compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * sizeof(sljit_sw);
-
- compiler->locals_offset = compiler->saveds_offset;
-
- if (saveds > 3)
- compiler->locals_offset += (saveds - 3) * sizeof(sljit_sw);
-
- if (options & SLJIT_F64_ALIGNMENT)
- compiler->locals_offset = (compiler->locals_offset + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1);
-
- size = 1 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3);
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- size += (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0);
-#else
- size += (args > 0 ? (2 + args * 3) : 0);
-#endif
- inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
- FAIL_IF(!inst);
-
- INC_SIZE(size);
- PUSH_REG(reg_map[TMP_REG1]);
-#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- if (args > 0) {
- *inst++ = MOV_r_rm;
- *inst++ = MOD_REG | (reg_map[TMP_REG1] << 3) | 0x4 /* esp */;
- }
-#endif
- if (saveds > 2 || scratches > 9)
- PUSH_REG(reg_map[SLJIT_S2]);
- if (saveds > 1 || scratches > 10)
- PUSH_REG(reg_map[SLJIT_S1]);
- if (saveds > 0 || scratches > 11)
- PUSH_REG(reg_map[SLJIT_S0]);
-
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- if (args > 0) {
- inst[0] = MOV_r_rm;
- inst[1] = MOD_REG | (reg_map[SLJIT_S0] << 3) | reg_map[SLJIT_R2];
- inst += 2;
- }
- if (args > 1) {
- inst[0] = MOV_r_rm;
- inst[1] = MOD_REG | (reg_map[SLJIT_S1] << 3) | reg_map[SLJIT_R1];
- inst += 2;
- }
- if (args > 2) {
- inst[0] = MOV_r_rm;
- inst[1] = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | 0x4 /* esp */;
- inst[2] = 0x24;
- inst[3] = sizeof(sljit_sw) * (3 + 2); /* saveds >= 3 as well. */
- }
-#else
- if (args > 0) {
- inst[0] = MOV_r_rm;
- inst[1] = MOD_DISP8 | (reg_map[SLJIT_S0] << 3) | reg_map[TMP_REG1];
- inst[2] = sizeof(sljit_sw) * 2;
- inst += 3;
- }
- if (args > 1) {
- inst[0] = MOV_r_rm;
- inst[1] = MOD_DISP8 | (reg_map[SLJIT_S1] << 3) | reg_map[TMP_REG1];
- inst[2] = sizeof(sljit_sw) * 3;
- inst += 3;
- }
- if (args > 2) {
- inst[0] = MOV_r_rm;
- inst[1] = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | reg_map[TMP_REG1];
- inst[2] = sizeof(sljit_sw) * 4;
- }
-#endif
-
- SLJIT_ASSERT(SLJIT_LOCALS_OFFSET > 0);
-
-#if defined(__APPLE__)
- /* Ignore pushed registers and SLJIT_LOCALS_OFFSET when computing the aligned local size. */
- saveds = (2 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw);
- local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds;
-#else
- if (options & SLJIT_F64_ALIGNMENT)
- local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1));
- else
- local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_sw) - 1) & ~(sizeof(sljit_sw) - 1));
-#endif
-
- compiler->local_size = local_size;
-
-#ifdef _WIN32
- if (local_size > 0) {
- if (local_size <= 4 * 4096) {
- if (local_size > 4096)
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096);
- if (local_size > 2 * 4096)
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 2);
- if (local_size > 3 * 4096)
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 3);
- }
- else {
- EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_SP, 0);
- EMIT_MOV(compiler, SLJIT_R1, 0, SLJIT_IMM, (local_size - 1) >> 12);
-
- SLJIT_ASSERT (reg_map[SLJIT_R0] == 0);
-
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_R0), -4096);
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 4096));
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1));
-
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
- FAIL_IF(!inst);
-
- INC_SIZE(2);
- inst[0] = JNE_i8;
- inst[1] = (sljit_s8) -16;
- }
-
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -local_size);
- }
-#endif
-
- SLJIT_ASSERT(local_size > 0);
-
-#if !defined(__APPLE__)
- if (options & SLJIT_F64_ALIGNMENT) {
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_SP, 0);
-
- /* Some space might allocated during sljit_grow_stack() above on WIN32. */
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size + sizeof(sljit_sw)));
-
-#if defined _WIN32 && !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- if (compiler->local_size > 1024)
- FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
- TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, sizeof(sljit_sw)));
-#endif
-
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 6);
- FAIL_IF(!inst);
-
- INC_SIZE(6);
- inst[0] = GROUP_BINARY_81;
- inst[1] = MOD_REG | AND | reg_map[SLJIT_SP];
- sljit_unaligned_store_sw(inst + 2, ~(sizeof(sljit_f64) - 1));
-
- /* The real local size must be used. */
- return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), compiler->local_size, TMP_REG1, 0);
- }
-#endif
- return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size);
-}
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
- sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
-{
- CHECK_ERROR();
- CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
- set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
-
- compiler->args = get_arg_count(arg_types);
-
- /* [esp+0] for saving temporaries and function calls. */
- compiler->stack_tmp_size = 2 * sizeof(sljit_sw);
-
-#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- if (scratches > 3)
- compiler->stack_tmp_size = 3 * sizeof(sljit_sw);
-#endif
-
- compiler->saveds_offset = compiler->stack_tmp_size;
- if (scratches > 3)
- compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * sizeof(sljit_sw);
-
- compiler->locals_offset = compiler->saveds_offset;
-
- if (saveds > 3)
- compiler->locals_offset += (saveds - 3) * sizeof(sljit_sw);
-
- if (options & SLJIT_F64_ALIGNMENT)
- compiler->locals_offset = (compiler->locals_offset + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1);
-
-#if defined(__APPLE__)
- saveds = (2 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw);
- compiler->local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds;
-#else
- if (options & SLJIT_F64_ALIGNMENT)
- compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1));
- else
- compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_sw) - 1) & ~(sizeof(sljit_sw) - 1));
-#endif
- return SLJIT_SUCCESS;
-}
-
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
-{
- sljit_s32 size;
- sljit_u8 *inst;
-
- CHECK_ERROR();
- CHECK(check_sljit_emit_return(compiler, op, src, srcw));
- SLJIT_ASSERT(compiler->args >= 0);
-
- FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
-
- SLJIT_ASSERT(compiler->local_size > 0);
-
-#if !defined(__APPLE__)
- if (compiler->options & SLJIT_F64_ALIGNMENT)
- EMIT_MOV(compiler, SLJIT_SP, 0, SLJIT_MEM1(SLJIT_SP), compiler->local_size)
- else
- FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
- SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size));
-#else
- FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
- SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size));
-#endif
-
- size = 2 + (compiler->scratches > 9 ? (compiler->scratches - 9) : 0) +
- (compiler->saveds <= 3 ? compiler->saveds : 3);
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- if (compiler->args > 2)
- size += 2;
-#endif
- inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
- FAIL_IF(!inst);
-
- INC_SIZE(size);
-
- if (compiler->saveds > 0 || compiler->scratches > 11)
- POP_REG(reg_map[SLJIT_S0]);
- if (compiler->saveds > 1 || compiler->scratches > 10)
- POP_REG(reg_map[SLJIT_S1]);
- if (compiler->saveds > 2 || compiler->scratches > 9)
- POP_REG(reg_map[SLJIT_S2]);
- POP_REG(reg_map[TMP_REG1]);
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- if (compiler->args > 2)
- RET_I16(sizeof(sljit_sw));
- else
- RET();
-#else
- RET();
-#endif
-
- return SLJIT_SUCCESS;
-}
-
-/* --------------------------------------------------------------------- */
-/* Operators */
-/* --------------------------------------------------------------------- */
-
/* Size contains the flags as well. */
-static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32 size,
+static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw size,
/* The register or immediate operand. */
sljit_s32 a, sljit_sw imma,
/* The general operand (not immediate). */
@@ -353,8 +51,9 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
{
sljit_u8 *inst;
sljit_u8 *buf_ptr;
- sljit_s32 flags = size & ~0xf;
- sljit_s32 inst_size;
+ sljit_u8 reg_map_b;
+ sljit_uw flags = size;
+ sljit_uw inst_size;
/* Both cannot be switched on. */
SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS));
@@ -363,54 +62,57 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
/* Both size flags cannot be switched on. */
SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG));
/* SSE2 and immediate is not possible. */
- SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2));
- SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3)
- && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66)
- && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66));
- /* We don't support (%ebp). */
- SLJIT_ASSERT(!(b & SLJIT_MEM) || immb || reg_map[b & REG_MASK] != 5);
+ SLJIT_ASSERT(a != SLJIT_IMM || !(flags & EX86_SSE2));
+ SLJIT_ASSERT(((flags & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66))
+ & ((flags & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66)) - 1)) == 0);
+ SLJIT_ASSERT((flags & (EX86_VEX_EXT | EX86_REX)) != EX86_VEX_EXT);
size &= 0xf;
- inst_size = size;
+ /* The mod r/m byte is always present. */
+ inst_size = size + 1;
- if (flags & (EX86_PREF_F2 | EX86_PREF_F3))
- inst_size++;
- if (flags & EX86_PREF_66)
+ if (flags & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66))
inst_size++;
/* Calculate size of b. */
- inst_size += 1; /* mod r/m byte. */
if (b & SLJIT_MEM) {
- if ((b & REG_MASK) == SLJIT_UNUSED)
+ if (!(b & REG_MASK))
inst_size += sizeof(sljit_sw);
- else if (immb != 0 && !(b & OFFS_REG_MASK)) {
- /* Immediate operand. */
- if (immb <= 127 && immb >= -128)
- inst_size += sizeof(sljit_s8);
- else
- inst_size += sizeof(sljit_sw);
- }
+ else {
+ if (immb != 0 && !(b & OFFS_REG_MASK)) {
+ /* Immediate operand. */
+ if (immb <= 127 && immb >= -128)
+ inst_size += sizeof(sljit_s8);
+ else
+ inst_size += sizeof(sljit_sw);
+ } else if (reg_map[b & REG_MASK] == 5) {
+ /* Swap registers if possible. */
+ if ((b & OFFS_REG_MASK) && (immb & 0x3) == 0 && reg_map[OFFS_REG(b)] != 5)
+ b = SLJIT_MEM | OFFS_REG(b) | TO_OFFS_REG(b & REG_MASK);
+ else
+ inst_size += sizeof(sljit_s8);
+ }
- if ((b & REG_MASK) == SLJIT_SP && !(b & OFFS_REG_MASK))
- b |= TO_OFFS_REG(SLJIT_SP);
+ if (reg_map[b & REG_MASK] == 4 && !(b & OFFS_REG_MASK))
+ b |= TO_OFFS_REG(SLJIT_SP);
- if ((b & OFFS_REG_MASK) != SLJIT_UNUSED)
- inst_size += 1; /* SIB byte. */
+ if (b & OFFS_REG_MASK)
+ inst_size += 1; /* SIB byte. */
+ }
}
/* Calculate size of a. */
- if (a & SLJIT_IMM) {
+ if (a == SLJIT_IMM) {
if (flags & EX86_BIN_INS) {
if (imma <= 127 && imma >= -128) {
inst_size += 1;
flags |= EX86_BYTE_ARG;
} else
inst_size += 4;
- }
- else if (flags & EX86_SHIFT_INS) {
- imma &= 0x1f;
+ } else if (flags & EX86_SHIFT_INS) {
+ SLJIT_ASSERT(imma <= 0x1f);
if (imma != 1) {
- inst_size ++;
+ inst_size++;
flags |= EX86_BYTE_ARG;
}
} else if (flags & EX86_BYTE_ARG)
@@ -419,8 +121,7 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
inst_size += sizeof(short);
else
inst_size += sizeof(sljit_sw);
- }
- else
+ } else
SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
inst = (sljit_u8*)ensure_buf(compiler, 1 + inst_size);
@@ -430,27 +131,26 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
INC_SIZE(inst_size);
if (flags & EX86_PREF_F2)
*inst++ = 0xf2;
- if (flags & EX86_PREF_F3)
+ else if (flags & EX86_PREF_F3)
*inst++ = 0xf3;
- if (flags & EX86_PREF_66)
+ else if (flags & EX86_PREF_66)
*inst++ = 0x66;
buf_ptr = inst + size;
/* Encode mod/rm byte. */
if (!(flags & EX86_SHIFT_INS)) {
- if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
+ if ((flags & EX86_BIN_INS) && a == SLJIT_IMM)
*inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81;
- if (a & SLJIT_IMM)
+ if (a == SLJIT_IMM)
*buf_ptr = 0;
else if (!(flags & EX86_SSE2_OP1))
- *buf_ptr = reg_map[a] << 3;
+ *buf_ptr = U8(reg_map[a] << 3);
else
- *buf_ptr = a << 3;
- }
- else {
- if (a & SLJIT_IMM) {
+ *buf_ptr = U8(freg_map[a] << 3);
+ } else {
+ if (a == SLJIT_IMM) {
if (imma == 1)
*inst = GROUP_SHIFT_1;
else
@@ -460,241 +160,597 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
*buf_ptr = 0;
}
- if (!(b & SLJIT_MEM))
- *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_map[b] : b);
- else if ((b & REG_MASK) != SLJIT_UNUSED) {
- if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) {
- if (immb != 0) {
+ if (!(b & SLJIT_MEM)) {
+ *buf_ptr = U8(*buf_ptr | MOD_REG | (!(flags & EX86_SSE2_OP2) ? reg_map[b] : freg_map[b]));
+ buf_ptr++;
+ } else if (b & REG_MASK) {
+ reg_map_b = reg_map[b & REG_MASK];
+
+ if (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) {
+ if (immb != 0 || reg_map_b == 5) {
if (immb <= 127 && immb >= -128)
*buf_ptr |= 0x40;
else
*buf_ptr |= 0x80;
}
- if ((b & OFFS_REG_MASK) == SLJIT_UNUSED)
- *buf_ptr++ |= reg_map[b & REG_MASK];
+ if (!(b & OFFS_REG_MASK))
+ *buf_ptr++ |= reg_map_b;
else {
- *buf_ptr++ |= 0x04;
- *buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3);
+ buf_ptr[0] |= 0x04;
+ buf_ptr[1] = U8(reg_map_b | (reg_map[OFFS_REG(b)] << 3));
+ buf_ptr += 2;
}
- if (immb != 0) {
+ if (immb != 0 || reg_map_b == 5) {
if (immb <= 127 && immb >= -128)
- *buf_ptr++ = immb; /* 8 bit displacement. */
+ *buf_ptr++ = U8(immb); /* 8 bit displacement. */
else {
sljit_unaligned_store_sw(buf_ptr, immb); /* 32 bit displacement. */
buf_ptr += sizeof(sljit_sw);
}
}
+ } else {
+ if (reg_map_b == 5)
+ *buf_ptr |= 0x40;
+
+ buf_ptr[0] |= 0x04;
+ buf_ptr[1] = U8(reg_map_b | (reg_map[OFFS_REG(b)] << 3) | (immb << 6));
+ buf_ptr += 2;
+
+ if (reg_map_b == 5)
+ *buf_ptr++ = 0;
}
- else {
- *buf_ptr++ |= 0x04;
- *buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3) | (immb << 6);
- }
- }
- else {
+ } else {
*buf_ptr++ |= 0x05;
sljit_unaligned_store_sw(buf_ptr, immb); /* 32 bit displacement. */
buf_ptr += sizeof(sljit_sw);
}
- if (a & SLJIT_IMM) {
+ if (a == SLJIT_IMM) {
if (flags & EX86_BYTE_ARG)
- *buf_ptr = imma;
+ *buf_ptr = U8(imma);
else if (flags & EX86_HALF_ARG)
- sljit_unaligned_store_s16(buf_ptr, imma);
+ sljit_unaligned_store_s16(buf_ptr, (sljit_s16)imma);
else if (!(flags & EX86_SHIFT_INS))
sljit_unaligned_store_sw(buf_ptr, imma);
}
- return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1);
+ return inst;
}
-/* --------------------------------------------------------------------- */
-/* Call / return instructions */
-/* --------------------------------------------------------------------- */
+static sljit_s32 emit_vex_instruction(struct sljit_compiler *compiler, sljit_uw op,
+ /* The first and second register operand. */
+ sljit_s32 a, sljit_s32 v,
+ /* The general operand (not immediate). */
+ sljit_s32 b, sljit_sw immb)
+{
+ sljit_u8 *inst;
+ sljit_u8 vex = 0;
+ sljit_u8 vex_m = 0;
+ sljit_uw size;
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+ SLJIT_ASSERT(((op & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66))
+ & ((op & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66)) - 1)) == 0);
-static sljit_s32 c_fast_call_get_stack_size(sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr)
-{
- sljit_s32 stack_size = 0;
- sljit_s32 word_arg_count = 0;
+ if (op & VEX_OP_0F38)
+ vex_m = 0x2;
+ else if (op & VEX_OP_0F3A)
+ vex_m = 0x3;
- arg_types >>= SLJIT_DEF_SHIFT;
+ if (op & VEX_W) {
+ if (vex_m == 0)
+ vex_m = 0x1;
- while (arg_types) {
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- stack_size += sizeof(sljit_f32);
- break;
- case SLJIT_ARG_TYPE_F64:
- stack_size += sizeof(sljit_f64);
- break;
- default:
- word_arg_count++;
- if (word_arg_count > 2)
- stack_size += sizeof(sljit_sw);
- break;
- }
+ vex |= 0x80;
+ }
+
+ if (op & EX86_PREF_66)
+ vex |= 0x1;
+ else if (op & EX86_PREF_F2)
+ vex |= 0x3;
+ else if (op & EX86_PREF_F3)
+ vex |= 0x2;
+
+ op &= ~(EX86_PREF_66 | EX86_PREF_F2 | EX86_PREF_F3);
+
+ if (op & VEX_256)
+ vex |= 0x4;
+
+ vex = U8(vex | ((((op & VEX_SSE2_OPV) ? freg_map[v] : reg_map[v]) ^ 0xf) << 3));
+
+ size = op & ~(sljit_uw)0xff;
+ size |= (vex_m == 0) ? 3 : 4;
- arg_types >>= SLJIT_DEF_SHIFT;
+ inst = emit_x86_instruction(compiler, size, a, 0, b, immb);
+ FAIL_IF(!inst);
+
+ if (vex_m == 0) {
+ inst[0] = 0xc5;
+ inst[1] = U8(vex | 0x80);
+ inst[2] = U8(op);
+ return SLJIT_SUCCESS;
}
- if (word_arg_count_ptr)
- *word_arg_count_ptr = word_arg_count;
+ inst[0] = 0xc4;
+ inst[1] = U8(vex_m | 0xe0);
+ inst[2] = vex;
+ inst[3] = U8(op);
+ return SLJIT_SUCCESS;
+}
+
+/* --------------------------------------------------------------------- */
+/* Enter / return */
+/* --------------------------------------------------------------------- */
+
+static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_sw executable_offset)
+{
+ sljit_uw type = jump->flags >> TYPE_SHIFT;
+
+ if (type == SLJIT_JUMP) {
+ *code_ptr++ = JMP_i32;
+ jump->addr++;
+ }
+ else if (type >= SLJIT_FAST_CALL) {
+ *code_ptr++ = CALL_i32;
+ jump->addr++;
+ }
+ else {
+ *code_ptr++ = GROUP_0F;
+ *code_ptr++ = get_jump_code(type);
+ jump->addr += 2;
+ }
- return stack_size;
+ if (jump->flags & JUMP_LABEL)
+ jump->flags |= PATCH_MW;
+ else
+ sljit_unaligned_store_sw(code_ptr, (sljit_sw)(jump->u.target - (jump->addr + 4) - (sljit_uw)executable_offset));
+ code_ptr += 4;
+
+ return code_ptr;
}
-static sljit_s32 c_fast_call_with_args(struct sljit_compiler *compiler,
- sljit_s32 arg_types, sljit_s32 stack_size, sljit_s32 word_arg_count, sljit_s32 swap_args)
+#define ENTER_TMP_TO_R4 0x00001
+#define ENTER_TMP_TO_S 0x00002
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
+ sljit_s32 word_arg_count, saved_arg_count, float_arg_count;
+ sljit_s32 size, args_size, types, status;
+ sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(options);
sljit_u8 *inst;
- sljit_s32 float_arg_count;
+#ifdef _WIN32
+ sljit_s32 r2_offset = -1;
+#endif
- if (stack_size == sizeof(sljit_sw) && word_arg_count == 3) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- PUSH_REG(reg_map[SLJIT_R2]);
- }
- else if (stack_size > 0) {
- if (word_arg_count >= 4)
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), compiler->saveds_offset - sizeof(sljit_sw));
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size));
+ /* Emit ENDBR32 at function entry if needed. */
+ FAIL_IF(emit_endbranch(compiler));
+
+ SLJIT_COMPILE_ASSERT(SLJIT_FR0 == 1, float_register_index_start);
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ word_arg_count = 0;
+ status = 0;
+
+ if (options & SLJIT_ENTER_REG_ARG) {
+ args_size = 3 * SSIZE_OF(sw);
- stack_size = 0;
- arg_types >>= SLJIT_DEF_SHIFT;
- word_arg_count = 0;
- float_arg_count = 0;
while (arg_types) {
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
+ word_arg_count++;
+ if (word_arg_count >= 4)
+ status |= ENTER_TMP_TO_R4;
+ }
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
+
+ compiler->args_size = 0;
+ } else {
+ types = arg_types;
+ saved_arg_count = 0;
+ float_arg_count = 0;
+ args_size = SSIZE_OF(sw);
+ while (types) {
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
float_arg_count++;
- FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
- stack_size += sizeof(sljit_f32);
+ FAIL_IF(emit_sse2_load(compiler, 0, float_arg_count, SLJIT_MEM1(SLJIT_SP), args_size));
+ args_size += SSIZE_OF(f64);
break;
- case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
float_arg_count++;
- FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
- stack_size += sizeof(sljit_f64);
+ FAIL_IF(emit_sse2_load(compiler, 1, float_arg_count, SLJIT_MEM1(SLJIT_SP), args_size));
+ args_size += SSIZE_OF(f32);
break;
default:
word_arg_count++;
- if (word_arg_count == 3) {
- EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, SLJIT_R2, 0);
- stack_size += sizeof(sljit_sw);
- }
- else if (word_arg_count == 4) {
- EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, TMP_REG1, 0);
- stack_size += sizeof(sljit_sw);
+
+ if (!(types & SLJIT_ARG_TYPE_SCRATCH_REG))
+ saved_arg_count++;
+
+ if (word_arg_count == 4) {
+ if (types & SLJIT_ARG_TYPE_SCRATCH_REG) {
+ status |= ENTER_TMP_TO_R4;
+ arg_types &= ~(SLJIT_ARG_FULL_MASK << 3 * SLJIT_ARG_SHIFT);
+ } else if (saved_arg_count == 4) {
+ status |= ENTER_TMP_TO_S;
+ arg_types &= ~(SLJIT_ARG_FULL_MASK << 3 * SLJIT_ARG_SHIFT);
+ }
}
+
+ args_size += SSIZE_OF(sw);
break;
}
+ types >>= SLJIT_ARG_SHIFT;
+ }
+
+ args_size -= SSIZE_OF(sw);
+ compiler->args_size = args_size;
+ }
- arg_types >>= SLJIT_DEF_SHIFT;
+ size = (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3) - kept_saveds_count;
+ if (!(options & SLJIT_ENTER_REG_ARG))
+ size++;
+
+ if (size != 0) {
+ inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(size + 1));
+ FAIL_IF(!inst);
+
+ INC_SIZE((sljit_uw)size);
+
+ if (!(options & SLJIT_ENTER_REG_ARG))
+ PUSH_REG(reg_map[TMP_REG1]);
+
+ if ((saveds > 2 && kept_saveds_count <= 2) || scratches > 9)
+ PUSH_REG(reg_map[SLJIT_S2]);
+ if ((saveds > 1 && kept_saveds_count <= 1) || scratches > 10)
+ PUSH_REG(reg_map[SLJIT_S1]);
+ if ((saveds > 0 && kept_saveds_count == 0) || scratches > 11)
+ PUSH_REG(reg_map[SLJIT_S0]);
+
+ size *= SSIZE_OF(sw);
+ }
+
+ if (status & (ENTER_TMP_TO_R4 | ENTER_TMP_TO_S))
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), args_size + size);
+
+ size += SSIZE_OF(sw);
+
+ local_size = ((SLJIT_LOCALS_OFFSET_BASE + local_size + size + 0xf) & ~0xf) - size;
+ compiler->local_size = local_size;
+
+ word_arg_count = 0;
+ saved_arg_count = 0;
+ args_size = size;
+ while (arg_types) {
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ args_size += SSIZE_OF(f64);
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ args_size += SSIZE_OF(f32);
+ break;
+ default:
+ word_arg_count++;
+ SLJIT_ASSERT(word_arg_count <= 3 || (word_arg_count == 4 && !(status & (ENTER_TMP_TO_R4 | ENTER_TMP_TO_S))));
+
+ if (arg_types & SLJIT_ARG_TYPE_SCRATCH_REG) {
+#ifdef _WIN32
+ if (word_arg_count == 3 && local_size > 4 * 4096)
+ r2_offset = local_size + args_size;
+ else
+#endif
+ EMIT_MOV(compiler, word_arg_count, 0, SLJIT_MEM1(SLJIT_SP), args_size);
+
+ } else {
+ EMIT_MOV(compiler, SLJIT_S0 - saved_arg_count, 0, SLJIT_MEM1(SLJIT_SP), args_size);
+ saved_arg_count++;
+ }
+
+ args_size += SSIZE_OF(sw);
+ break;
}
+ arg_types >>= SLJIT_ARG_SHIFT;
}
- if (word_arg_count > 0) {
- if (swap_args) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
+ SLJIT_ASSERT(SLJIT_LOCALS_OFFSET > 0);
- *inst++ = XCHG_EAX_r | reg_map[SLJIT_R2];
+#ifdef _WIN32
+ SLJIT_ASSERT(r2_offset == -1 || local_size > 4 * 4096);
+
+ if (local_size > 4096) {
+ if (local_size <= 4 * 4096) {
+ BINARY_IMM32(OR, 0, SLJIT_MEM1(SLJIT_SP), -4096);
+
+ if (local_size > 2 * 4096)
+ BINARY_IMM32(OR, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 2);
+ if (local_size > 3 * 4096)
+ BINARY_IMM32(OR, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 3);
}
else {
+ if (options & SLJIT_ENTER_REG_ARG) {
+ SLJIT_ASSERT(r2_offset == -1);
+
+ inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(1 + 1));
+ FAIL_IF(!inst);
+ INC_SIZE(1);
+ PUSH_REG(reg_map[SLJIT_R2]);
+
+ local_size -= SSIZE_OF(sw);
+ r2_offset = local_size;
+ }
+
+ EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_IMM, local_size >> 12);
+
+ BINARY_IMM32(OR, 0, SLJIT_MEM1(SLJIT_SP), -4096);
+ BINARY_IMM32(SUB, 4096, SLJIT_SP, 0);
+
inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
FAIL_IF(!inst);
+
INC_SIZE(2);
+ inst[0] = LOOP_i8;
+ inst[1] = (sljit_u8)-16;
+ local_size &= 0xfff;
+ }
+ }
- *inst++ = MOV_r_rm;
- *inst++ = MOD_REG | (reg_map[SLJIT_R2] << 3) | reg_map[SLJIT_R0];
+ if (local_size > 0) {
+ BINARY_IMM32(OR, 0, SLJIT_MEM1(SLJIT_SP), -local_size);
+ BINARY_IMM32(SUB, local_size, SLJIT_SP, 0);
+ }
+
+ if (r2_offset != -1)
+ EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), r2_offset);
+
+#else /* !_WIN32 */
+
+ SLJIT_ASSERT(local_size > 0);
+
+ BINARY_IMM32(SUB, local_size, SLJIT_SP, 0);
+
+#endif /* _WIN32 */
+
+ size = SLJIT_LOCALS_OFFSET_BASE - SSIZE_OF(sw);
+ kept_saveds_count = SLJIT_R3 - kept_saveds_count;
+
+ while (saved_arg_count > 3) {
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), size, kept_saveds_count, 0);
+ kept_saveds_count++;
+ size -= SSIZE_OF(sw);
+ saved_arg_count--;
+ }
+
+ if (status & (ENTER_TMP_TO_R4 | ENTER_TMP_TO_S)) {
+ if (status & ENTER_TMP_TO_R4)
+ size = 2 * SSIZE_OF(sw);
+
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), size, TMP_REG1, 0);
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
+{
+ sljit_s32 args_size;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ args_size = 0;
+
+ if (!(options & SLJIT_ENTER_REG_ARG)) {
+ while (arg_types) {
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ args_size += SSIZE_OF(f64);
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ args_size += SSIZE_OF(f32);
+ break;
+ default:
+ args_size += SSIZE_OF(sw);
+ break;
+ }
+ arg_types >>= SLJIT_ARG_SHIFT;
}
}
+ compiler->args_size = args_size;
+
+ /* [esp+0] for saving temporaries and for function calls. */
+
+ saveds = (1 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3) - SLJIT_KEPT_SAVEDS_COUNT(options)) * SSIZE_OF(sw);
+
+ /* Saving ebp. */
+ if (!(options & SLJIT_ENTER_REG_ARG))
+ saveds += SSIZE_OF(sw);
+
+ compiler->local_size = ((SLJIT_LOCALS_OFFSET_BASE + local_size + saveds + 0xf) & ~0xf) - saveds;
return SLJIT_SUCCESS;
}
-#endif
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to)
+{
+ sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
+ sljit_s32 local_size, saveds;
+ sljit_uw size;
+ sljit_u8 *inst;
+
+ size = (sljit_uw)((compiler->scratches > 9 ? (compiler->scratches - 9) : 0) +
+ (compiler->saveds <= 3 ? compiler->saveds : 3) - kept_saveds_count);
+
+ local_size = compiler->local_size;
+
+ if (!(compiler->options & SLJIT_ENTER_REG_ARG))
+ size++;
+ else if (is_return_to && size == 0) {
+ local_size += SSIZE_OF(sw);
+ is_return_to = 0;
+ }
+
+ if (local_size > 0)
+ BINARY_IMM32(ADD, local_size, SLJIT_SP, 0);
+
+ if (size == 0)
+ return SLJIT_SUCCESS;
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
+ FAIL_IF(!inst);
+
+ INC_SIZE(size);
-static sljit_s32 cdecl_call_get_stack_size(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr)
+ saveds = compiler->saveds;
+
+ if ((saveds > 0 && kept_saveds_count == 0) || compiler->scratches > 11)
+ POP_REG(reg_map[SLJIT_S0]);
+ if ((saveds > 1 && kept_saveds_count <= 1) || compiler->scratches > 10)
+ POP_REG(reg_map[SLJIT_S1]);
+ if ((saveds > 2 && kept_saveds_count <= 2) || compiler->scratches > 9)
+ POP_REG(reg_map[SLJIT_S2]);
+
+ if (!(compiler->options & SLJIT_ENTER_REG_ARG))
+ POP_REG(reg_map[TMP_REG1]);
+
+ if (is_return_to)
+ BINARY_IMM32(ADD, sizeof(sljit_sw), SLJIT_SP, 0);
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
{
- sljit_s32 stack_size = 0;
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
+
+ SLJIT_ASSERT(compiler->args_size >= 0);
+ SLJIT_ASSERT(compiler->local_size > 0);
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
+
+ return emit_byte(compiler, RET_near);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 src_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_to(compiler, src, srcw));
+
+ if ((src & SLJIT_MEM) || (src > SLJIT_R2 && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ CHECK_EXTRA_REGS(src, srcw, (void)0);
+
+ src_r = (compiler->options & SLJIT_ENTER_REG_ARG) ? TMP_REG1 : SLJIT_R1;
+
+ EMIT_MOV(compiler, src_r, 0, src, srcw);
+ src = src_r;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 1));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
+}
+
+/* --------------------------------------------------------------------- */
+/* Call / return instructions */
+/* --------------------------------------------------------------------- */
+
+static sljit_s32 call_get_stack_size(sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr)
+{
+ sljit_sw stack_size = 0;
sljit_s32 word_arg_count = 0;
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
- stack_size += sizeof(sljit_f32);
- break;
+ switch (arg_types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
- stack_size += sizeof(sljit_f64);
+ stack_size += SSIZE_OF(f64);
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ stack_size += SSIZE_OF(f32);
break;
default:
word_arg_count++;
- stack_size += sizeof(sljit_sw);
+ stack_size += SSIZE_OF(sw);
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
if (word_arg_count_ptr)
*word_arg_count_ptr = word_arg_count;
- if (stack_size <= compiler->stack_tmp_size)
+ if (stack_size <= 4 * SSIZE_OF(sw))
return 0;
-#if defined(__APPLE__)
- return ((stack_size - compiler->stack_tmp_size + 15) & ~15);
-#else
- return stack_size - compiler->stack_tmp_size;
-#endif
+ return ((stack_size - (4 * SSIZE_OF(sw)) + 0xf) & ~0xf);
}
-static sljit_s32 cdecl_call_with_args(struct sljit_compiler *compiler,
- sljit_s32 arg_types, sljit_s32 stack_size, sljit_s32 word_arg_count)
+static sljit_s32 call_with_args(struct sljit_compiler *compiler,
+ sljit_s32 arg_types, sljit_sw stack_size, sljit_s32 word_arg_count, sljit_s32 keep_tmp1)
{
- sljit_s32 float_arg_count = 0;
+ sljit_s32 float_arg_count = 0, arg4_reg = 0, arg_offset;
+ sljit_u8 *inst;
+
+ if (word_arg_count >= 4) {
+ arg4_reg = SLJIT_R0;
- if (word_arg_count >= 4)
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), compiler->saveds_offset - sizeof(sljit_sw));
+ if (!keep_tmp1) {
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), 2 * SSIZE_OF(sw));
+ arg4_reg = TMP_REG1;
+ }
+ }
if (stack_size > 0)
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size));
+ BINARY_IMM32(SUB, stack_size, SLJIT_SP, 0);
- stack_size = 0;
+ arg_offset = 0;
word_arg_count = 0;
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
float_arg_count++;
- FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
- stack_size += sizeof(sljit_f32);
+ FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), arg_offset, float_arg_count));
+ arg_offset += SSIZE_OF(f64);
break;
- case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
float_arg_count++;
- FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
- stack_size += sizeof(sljit_f64);
+ FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), arg_offset, float_arg_count));
+ arg_offset += SSIZE_OF(f32);
break;
default:
word_arg_count++;
- EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, (word_arg_count >= 4) ? TMP_REG1 : word_arg_count, 0);
- stack_size += sizeof(sljit_sw);
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), arg_offset, (word_arg_count >= 4) ? arg4_reg : word_arg_count, 0);
+
+ if (word_arg_count == 1 && arg4_reg == SLJIT_R0)
+ EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_SP), 2 * SSIZE_OF(sw) + stack_size);
+
+ arg_offset += SSIZE_OF(sw);
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
return SLJIT_SUCCESS;
@@ -707,13 +763,12 @@ static sljit_s32 post_call_with_args(struct sljit_compiler *compiler,
sljit_s32 single;
if (stack_size > 0)
- FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
- SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size));
+ BINARY_IMM32(ADD, stack_size, SLJIT_SP, 0);
- if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32)
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64)
return SLJIT_SUCCESS;
- single = ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32);
+ single = ((arg_types & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F32);
inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
FAIL_IF(!inst);
@@ -725,42 +780,304 @@ static sljit_s32 post_call_with_args(struct sljit_compiler *compiler,
return emit_sse2_load(compiler, single, SLJIT_FR0, SLJIT_MEM1(SLJIT_SP), 0);
}
+static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler,
+ sljit_s32 *extra_space, sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_sw args_size, saved_regs_size;
+ sljit_sw types, word_arg_count, float_arg_count;
+ sljit_sw stack_size, prev_stack_size, min_size, offset;
+ sljit_sw word_arg4_offset;
+ sljit_u8 r2_offset = 0;
+ sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
+ sljit_u8* inst;
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ CHECK_EXTRA_REGS(src, srcw, (void)0);
+
+ saved_regs_size = (1 + (compiler->scratches > 9 ? (compiler->scratches - 9) : 0)
+ + (compiler->saveds <= 3 ? compiler->saveds : 3) - kept_saveds_count) * SSIZE_OF(sw);
+
+ word_arg_count = 0;
+ float_arg_count = 0;
+ arg_types >>= SLJIT_ARG_SHIFT;
+ types = 0;
+ args_size = 0;
+
+ while (arg_types != 0) {
+ types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
+
+ switch (arg_types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ args_size += SSIZE_OF(f64);
+ float_arg_count++;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ args_size += SSIZE_OF(f32);
+ float_arg_count++;
+ break;
+ default:
+ word_arg_count++;
+ args_size += SSIZE_OF(sw);
+ break;
+ }
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
+
+ if (args_size <= compiler->args_size) {
+ *extra_space = 0;
+ stack_size = args_size + SSIZE_OF(sw) + saved_regs_size;
+
+ offset = stack_size + compiler->local_size;
+
+ if (src != SLJIT_IMM && src != SLJIT_R0) {
+ if (word_arg_count >= 1) {
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_R0, 0);
+ r2_offset = sizeof(sljit_sw);
+ }
+ EMIT_MOV(compiler, SLJIT_R0, 0, src, srcw);
+ }
+
+ while (types != 0) {
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), offset, float_arg_count));
+ float_arg_count--;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ offset -= SSIZE_OF(f32);
+ FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), offset, float_arg_count));
+ float_arg_count--;
+ break;
+ default:
+ switch (word_arg_count) {
+ case 1:
+ offset -= SSIZE_OF(sw);
+ if (r2_offset != 0) {
+ EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), 0);
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0);
+ } else
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R0, 0);
+ break;
+ case 2:
+ offset -= SSIZE_OF(sw);
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R1, 0);
+ break;
+ case 3:
+ offset -= SSIZE_OF(sw);
+ break;
+ case 4:
+ offset -= SSIZE_OF(sw);
+ EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), 2 * SSIZE_OF(sw));
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0);
+ break;
+ }
+ word_arg_count--;
+ break;
+ }
+ types >>= SLJIT_ARG_SHIFT;
+ }
+
+ return emit_stack_frame_release(compiler, 0);
+ }
+
+ stack_size = args_size + SSIZE_OF(sw);
+
+ if (word_arg_count >= 1 && src != SLJIT_IMM && src != SLJIT_R0) {
+ r2_offset = SSIZE_OF(sw);
+ stack_size += SSIZE_OF(sw);
+ }
+
+ if (word_arg_count >= 3)
+ stack_size += SSIZE_OF(sw);
+
+ prev_stack_size = SSIZE_OF(sw) + saved_regs_size;
+ min_size = prev_stack_size + compiler->local_size;
+
+ word_arg4_offset = 2 * SSIZE_OF(sw);
+
+ if (stack_size > min_size) {
+ BINARY_IMM32(SUB, stack_size - min_size, SLJIT_SP, 0);
+ if (src == SLJIT_MEM1(SLJIT_SP))
+ srcw += stack_size - min_size;
+ word_arg4_offset += stack_size - min_size;
+ }
+ else
+ stack_size = min_size;
+
+ if (word_arg_count >= 3) {
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), r2_offset, SLJIT_R2, 0);
+
+ if (word_arg_count >= 4)
+ EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), word_arg4_offset);
+ }
+
+ if (src != SLJIT_IMM && src != SLJIT_R0) {
+ if (word_arg_count >= 1) {
+ SLJIT_ASSERT(r2_offset == sizeof(sljit_sw));
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_R0, 0);
+ }
+ EMIT_MOV(compiler, SLJIT_R0, 0, src, srcw);
+ }
+
+ /* Restore saved registers. */
+ offset = stack_size - 2 * SSIZE_OF(sw);
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), offset);
+
+ if (compiler->saveds > 2 || compiler->scratches > 9) {
+ offset -= SSIZE_OF(sw);
+ EMIT_MOV(compiler, SLJIT_S2, 0, SLJIT_MEM1(SLJIT_SP), offset);
+ }
+ if ((compiler->saveds > 1 && kept_saveds_count <= 1) || compiler->scratches > 10) {
+ offset -= SSIZE_OF(sw);
+ EMIT_MOV(compiler, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_SP), offset);
+ }
+ if ((compiler->saveds > 0 && kept_saveds_count == 0) || compiler->scratches > 11) {
+ offset -= SSIZE_OF(sw);
+ EMIT_MOV(compiler, SLJIT_S0, 0, SLJIT_MEM1(SLJIT_SP), offset);
+ }
+
+ /* Copy fourth argument and return address. */
+ offset = stack_size - SSIZE_OF(sw);
+ *extra_space = args_size;
+
+ if (word_arg_count >= 4) {
+ offset -= SSIZE_OF(sw);
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0);
+ }
+
+ while (types != 0) {
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
+ offset -= SSIZE_OF(f64);
+ FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), offset, float_arg_count));
+ float_arg_count--;
+ break;
+ case SLJIT_ARG_TYPE_F32:
+ offset -= SSIZE_OF(f32);
+ FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), offset, float_arg_count));
+ float_arg_count--;
+ break;
+ default:
+ switch (word_arg_count) {
+ case 1:
+ offset -= SSIZE_OF(sw);
+ if (r2_offset != 0) {
+ EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), 0);
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0);
+ } else
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R0, 0);
+ break;
+ case 2:
+ offset -= SSIZE_OF(sw);
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R1, 0);
+ break;
+ case 3:
+ offset -= SSIZE_OF(sw);
+ EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), r2_offset);
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0);
+ break;
+ }
+ word_arg_count--;
+ break;
+ }
+ types >>= SLJIT_ARG_SHIFT;
+ }
+
+ SLJIT_ASSERT(offset >= 0);
+
+ if (offset == 0)
+ return SLJIT_SUCCESS;
+
+ BINARY_IMM32(ADD, offset, SLJIT_SP, 0);
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 emit_tail_call_end(struct sljit_compiler *compiler, sljit_s32 extra_space)
+{
+ /* Called when stack consumption cannot be reduced to 0. */
+ sljit_u8 *inst;
+
+ BINARY_IMM32(ADD, extra_space, SLJIT_SP, 0);
+ return emit_byte(compiler, RET_near);
+}
+
+static sljit_s32 tail_call_reg_arg_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
+{
+ sljit_s32 word_arg_count = 0;
+ sljit_s32 kept_saveds_count, offset;
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+
+ while (arg_types) {
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64)
+ word_arg_count++;
+
+ arg_types >>= SLJIT_ARG_SHIFT;
+ }
+
+ if (word_arg_count < 4)
+ return SLJIT_SUCCESS;
+
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), 2 * SSIZE_OF(sw));
+
+ kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
+ offset = compiler->local_size + 3 * SSIZE_OF(sw);
+
+ if ((compiler->saveds > 0 && kept_saveds_count == 0) || compiler->scratches > 11)
+ offset += SSIZE_OF(sw);
+ if ((compiler->saveds > 1 && kept_saveds_count <= 1) || compiler->scratches > 10)
+ offset += SSIZE_OF(sw);
+ if ((compiler->saveds > 2 && kept_saveds_count <= 2) || compiler->scratches > 9)
+ offset += SSIZE_OF(sw);
+
+ return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), offset, TMP_REG1, 0);
+}
+
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 arg_types)
{
struct sljit_jump *jump;
- sljit_s32 stack_size = 0;
+ sljit_sw stack_size = 0;
sljit_s32 word_arg_count;
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- if ((type & 0xff) == SLJIT_CALL) {
- stack_size = c_fast_call_get_stack_size(arg_types, &word_arg_count);
- PTR_FAIL_IF(c_fast_call_with_args(compiler, arg_types, stack_size, word_arg_count, 0));
+ if (type & SLJIT_CALL_RETURN) {
+ if ((type & 0xff) == SLJIT_CALL_REG_ARG) {
+ PTR_FAIL_IF(tail_call_reg_arg_with_args(compiler, arg_types));
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, 0));
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_jump(compiler, SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP));
+ }
+
+ stack_size = type;
+ PTR_FAIL_IF(tail_call_with_args(compiler, &stack_size, arg_types, SLJIT_IMM, 0));
+
+ SLJIT_SKIP_CHECKS(compiler);
+
+ if (stack_size == 0)
+ return sljit_emit_jump(compiler, SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP));
jump = sljit_emit_jump(compiler, type);
PTR_FAIL_IF(jump == NULL);
- PTR_FAIL_IF(post_call_with_args(compiler, arg_types, 0));
+ PTR_FAIL_IF(emit_tail_call_end(compiler, stack_size));
return jump;
}
-#endif
- stack_size = cdecl_call_get_stack_size(compiler, arg_types, &word_arg_count);
- PTR_FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, word_arg_count));
+ if ((type & 0xff) == SLJIT_CALL_REG_ARG) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_jump(compiler, type);
+ }
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ stack_size = call_get_stack_size(arg_types, &word_arg_count);
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, stack_size, word_arg_count, 0));
+ SLJIT_SKIP_CHECKS(compiler);
jump = sljit_emit_jump(compiler, type);
PTR_FAIL_IF(jump == NULL);
@@ -772,96 +1089,115 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
sljit_s32 arg_types,
sljit_s32 src, sljit_sw srcw)
{
- sljit_s32 stack_size = 0;
+ sljit_sw stack_size = 0;
sljit_s32 word_arg_count;
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- sljit_s32 swap_args;
-#endif
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- SLJIT_ASSERT(reg_map[SLJIT_R0] == 0 && reg_map[SLJIT_R2] == 1 && SLJIT_R0 == 1 && SLJIT_R2 == 3);
+ if (type & SLJIT_CALL_RETURN) {
+ if ((type & 0xff) == SLJIT_CALL_REG_ARG) {
+ FAIL_IF(tail_call_reg_arg_with_args(compiler, arg_types));
- if ((type & 0xff) == SLJIT_CALL) {
- stack_size = c_fast_call_get_stack_size(arg_types, &word_arg_count);
- swap_args = 0;
+ if ((src & SLJIT_MEM) || (src > SLJIT_R2 && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ CHECK_EXTRA_REGS(src, srcw, (void)0);
- if (word_arg_count > 0) {
- if ((src & REG_MASK) == SLJIT_R2 || OFFS_REG(src) == SLJIT_R2) {
- swap_args = 1;
- if (((src & REG_MASK) | 0x2) == SLJIT_R2)
- src ^= 0x2;
- if ((OFFS_REG(src) | 0x2) == SLJIT_R2)
- src ^= TO_OFFS_REG(0x2);
+ EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
+ src = TMP_REG1;
+ srcw = 0;
}
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
+ }
+
+ stack_size = type;
+ FAIL_IF(tail_call_with_args(compiler, &stack_size, arg_types, src, srcw));
+
+ if (src != SLJIT_IMM) {
+ src = SLJIT_R0;
+ srcw = 0;
}
- FAIL_IF(c_fast_call_with_args(compiler, arg_types, stack_size, word_arg_count, swap_args));
+ SLJIT_SKIP_CHECKS(compiler);
- compiler->saveds_offset += stack_size;
- compiler->locals_offset += stack_size;
+ if (stack_size == 0)
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
+ return emit_tail_call_end(compiler, stack_size);
+ }
- compiler->saveds_offset -= stack_size;
- compiler->locals_offset -= stack_size;
+ if ((type & 0xff) == SLJIT_CALL_REG_ARG) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, type, src, srcw);
+ }
- return post_call_with_args(compiler, arg_types, 0);
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ CHECK_EXTRA_REGS(src, srcw, (void)0);
+
+ if (src & SLJIT_MEM) {
+ EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
+ src = TMP_REG1;
+ srcw = 0;
}
-#endif
- stack_size = cdecl_call_get_stack_size(compiler, arg_types, &word_arg_count);
- FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, word_arg_count));
+ stack_size = call_get_stack_size(arg_types, &word_arg_count);
+ FAIL_IF(call_with_args(compiler, arg_types, stack_size, word_arg_count, src == TMP_REG1));
- compiler->saveds_offset += stack_size;
- compiler->locals_offset += stack_size;
+ if (stack_size > 0 && src == SLJIT_MEM1(SLJIT_SP))
+ srcw += stack_size;
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ SLJIT_SKIP_CHECKS(compiler);
FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
- compiler->saveds_offset -= stack_size;
- compiler->locals_offset -= stack_size;
-
return post_call_with_args(compiler, arg_types, stack_size);
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+static SLJIT_INLINE sljit_s32 emit_fmov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
{
- sljit_u8 *inst;
+ sljit_u8* inst;
- CHECK_ERROR();
- CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
- ADJUST_LOCAL_OFFSET(dst, dstw);
-
- CHECK_EXTRA_REGS(dst, dstw, (void)0);
+ if (compiler->options & SLJIT_ENTER_REG_ARG) {
+ if (src == SLJIT_FR0)
+ return SLJIT_SUCCESS;
- /* For UNUSED dst. Uncommon, but possible. */
- if (dst == SLJIT_UNUSED)
- dst = TMP_REG1;
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_fop1(compiler, op, SLJIT_RETURN_FREG, 0, src, srcw);
+ }
- if (FAST_IS_REG(dst)) {
- /* Unused dest is possible here. */
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
+ if (FAST_IS_REG(src)) {
+ FAIL_IF(emit_sse2_store(compiler, op & SLJIT_32, SLJIT_MEM1(SLJIT_SP), 0, src));
- INC_SIZE(1);
- POP_REG(reg_map[dst]);
- return SLJIT_SUCCESS;
+ src = SLJIT_MEM1(SLJIT_SP);
+ srcw = 0;
+ } else {
+ ADJUST_LOCAL_OFFSET(src, srcw);
}
+ inst = emit_x86_instruction(compiler, 1 | EX86_SSE2_OP1, 0, 0, src, srcw);
+ *inst = (op & SLJIT_32) ? FLDS : FLDL;
+
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_u8 *inst;
+
+ CHECK_EXTRA_REGS(dst, dstw, (void)0);
+
+ /* Unused dest is possible here. */
+ if (FAST_IS_REG(dst))
+ return emit_byte(compiler, U8(POP_r + reg_map[dst]));
+
/* Memory. */
inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw);
FAIL_IF(!inst);
- *inst++ = POP_rm;
+ *inst = POP_rm;
return SLJIT_SUCCESS;
}
@@ -881,8 +1217,8 @@ static sljit_s32 emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src
else {
inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
FAIL_IF(!inst);
- *inst++ = GROUP_FF;
- *inst |= PUSH_rm;
+ inst[0] = GROUP_FF;
+ inst[1] |= PUSH_rm;
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
FAIL_IF(!inst);
@@ -893,36 +1229,395 @@ static sljit_s32 emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src
return SLJIT_SUCCESS;
}
+static sljit_s32 sljit_emit_get_return_address(struct sljit_compiler *compiler,
+ sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_s32 options = compiler->options;
+ sljit_s32 saveds = compiler->saveds;
+ sljit_s32 scratches = compiler->scratches;
+
+ saveds = ((scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3) - SLJIT_KEPT_SAVEDS_COUNT(options)) * SSIZE_OF(sw);
+
+ /* Saving ebp. */
+ if (!(options & SLJIT_ENTER_REG_ARG))
+ saveds += SSIZE_OF(sw);
+
+ return emit_mov(compiler, dst, dstw, SLJIT_MEM1(SLJIT_SP), compiler->local_size + saveds);
+}
+
+/* --------------------------------------------------------------------- */
+/* Other operations */
+/* --------------------------------------------------------------------- */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_u8* inst;
+ sljit_s32 i, next, reg_idx, offset;
+ sljit_u8 regs[2];
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ if (!(reg & REG_PAIR_MASK))
+ return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
+
+ ADJUST_LOCAL_OFFSET(mem, memw);
+
+ regs[0] = U8(REG_PAIR_FIRST(reg));
+ regs[1] = U8(REG_PAIR_SECOND(reg));
+
+ next = SSIZE_OF(sw);
+
+ if (!(type & SLJIT_MEM_STORE) && (regs[0] == (mem & REG_MASK) || regs[0] == OFFS_REG(mem))) {
+ if (regs[1] == (mem & REG_MASK) || regs[1] == OFFS_REG(mem)) {
+ /* None of them are virtual register so TMP_REG1 will not be used. */
+ EMIT_MOV(compiler, TMP_REG1, 0, OFFS_REG(mem), 0);
+
+ if (regs[1] == OFFS_REG(mem))
+ next = -SSIZE_OF(sw);
+
+ mem = (mem & ~OFFS_REG_MASK) | TO_OFFS_REG(TMP_REG1);
+ } else {
+ next = -SSIZE_OF(sw);
+
+ if (!(mem & OFFS_REG_MASK))
+ memw += SSIZE_OF(sw);
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ reg_idx = next > 0 ? i : (i ^ 0x1);
+ reg = regs[reg_idx];
+
+ offset = -1;
+
+ if (reg >= SLJIT_R3 && reg <= SLJIT_S3) {
+ offset = (2 * SSIZE_OF(sw)) + ((reg) - SLJIT_R3) * SSIZE_OF(sw);
+ reg = TMP_REG1;
+
+ if (type & SLJIT_MEM_STORE)
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), offset);
+ }
+
+ if ((mem & OFFS_REG_MASK) && (reg_idx == 1)) {
+ inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(1 + 4));
+ FAIL_IF(!inst);
+
+ INC_SIZE(4);
+
+ inst[0] = (type & SLJIT_MEM_STORE) ? MOV_rm_r : MOV_r_rm;
+ inst[1] = 0x44 | U8(reg_map[reg] << 3);
+ inst[2] = U8(memw << 6) | U8(reg_map[OFFS_REG(mem)] << 3) | reg_map[mem & REG_MASK];
+ inst[3] = sizeof(sljit_sw);
+ } else if (type & SLJIT_MEM_STORE) {
+ EMIT_MOV(compiler, mem, memw, reg, 0);
+ } else {
+ EMIT_MOV(compiler, reg, 0, mem, memw);
+ }
+
+ if (!(mem & OFFS_REG_MASK))
+ memw += next;
+
+ if (!(type & SLJIT_MEM_STORE) && offset != -1)
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, TMP_REG1, 0);
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG;
+ sljit_u8 *inst, *jump_inst1, *jump_inst2;
+ sljit_uw size1, size2;
+
+ /* Binary representation of 0x80000000. */
+ static const sljit_f64 f64_high_bit = (sljit_f64)0x80000000ul;
+
+ CHECK_EXTRA_REGS(src, srcw, (void)0);
+
+ if (!(op & SLJIT_32)) {
+ EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
+
+ inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 1, TMP_REG1, 0);
+ FAIL_IF(!inst);
+ inst[1] |= ROL;
+
+ inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 1, TMP_REG1, 0);
+ FAIL_IF(!inst);
+ inst[1] |= SHR;
+
+ FAIL_IF(emit_groupf(compiler, CVTSI2SD_x_rm | EX86_PREF_F2 | EX86_SSE2_OP1, dst_r, TMP_REG1, 0));
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(2);
+ inst[0] = U8(get_jump_code(SLJIT_NOT_CARRY) - 0x10);
+
+ size1 = compiler->size;
+ FAIL_IF(emit_groupf(compiler, ADDSD_x_xm | EX86_PREF_F2 | EX86_SSE2, dst_r, SLJIT_MEM0(), (sljit_sw)&f64_high_bit));
+
+ inst[1] = U8(compiler->size - size1);
+
+ if (dst_r == TMP_FREG)
+ return emit_sse2_store(compiler, 0, dst, dstw, TMP_FREG);
+ return SLJIT_SUCCESS;
+ }
+
+ if (!FAST_IS_REG(src)) {
+ EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
+ src = TMP_REG1;
+ }
+
+ BINARY_IMM32(CMP, 0, src, 0);
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(2);
+ inst[0] = JL_i8;
+ jump_inst1 = inst;
+
+ size1 = compiler->size;
+
+ FAIL_IF(emit_groupf(compiler, CVTSI2SD_x_rm | EX86_SELECT_F2_F3(op) | EX86_SSE2_OP1, dst_r, src, 0));
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(2);
+ inst[0] = JMP_i8;
+ jump_inst2 = inst;
+
+ size2 = compiler->size;
+
+ jump_inst1[1] = U8(size2 - size1);
+
+ if (src != TMP_REG1)
+ EMIT_MOV(compiler, TMP_REG1, 0, src, 0);
+
+ inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 1, TMP_REG1, 0);
+ FAIL_IF(!inst);
+ inst[1] |= SHR;
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(2);
+ inst[0] = JNC_i8;
+ jump_inst1 = inst;
+
+ size1 = compiler->size;
+
+ BINARY_IMM32(OR, 1, TMP_REG1, 0);
+ jump_inst1[1] = U8(compiler->size - size1);
+
+ FAIL_IF(emit_groupf(compiler, CVTSI2SD_x_rm | EX86_SELECT_F2_F3(op) | EX86_SSE2_OP1, dst_r, TMP_REG1, 0));
+ FAIL_IF(emit_groupf(compiler, ADDSD_x_xm | EX86_SELECT_F2_F3(op) | EX86_SSE2, dst_r, dst_r, 0));
+
+ jump_inst2[1] = U8(compiler->size - size2);
+
+ if (dst_r == TMP_FREG)
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
+{
+ sljit_u8 *inst;
+ union {
+ sljit_s32 imm;
+ sljit_f32 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm != 0)
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, u.imm);
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
+ FAIL_IF(!inst);
+ INC_SIZE(4);
+
+ inst[0] = GROUP_66;
+ inst[1] = GROUP_0F;
+
+ if (u.imm == 0) {
+ inst[2] = PXOR_x_xm;
+ inst[3] = U8(freg | (freg << 3) | MOD_REG);
+ } else {
+ inst[2] = MOVD_x_rm;
+ inst[3] = U8(reg_map[TMP_REG1] | (freg << 3) | MOD_REG);
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ sljit_u8 *inst;
+ sljit_s32 tmp_freg = freg;
+ union {
+ sljit_s32 imm[2];
+ sljit_f64 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm[0] == 0) {
+ if (u.imm[1] == 0)
+ return emit_groupf(compiler, PXOR_x_xm | EX86_PREF_66 | EX86_SSE2, freg, freg, 0);
+
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, u.imm[1]);
+ } else
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, u.imm[0]);
+
+ FAIL_IF(emit_groupf(compiler, MOVD_x_rm | EX86_PREF_66 | EX86_SSE2_OP1, freg, TMP_REG1, 0));
+
+ if (u.imm[1] == 0)
+ return SLJIT_SUCCESS;
+
+ if (u.imm[0] == 0) {
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
+ FAIL_IF(!inst);
+ INC_SIZE(4);
+
+ inst[0] = GROUP_0F;
+ inst[1] = SHUFPS_x_xm;
+ inst[2] = U8(MOD_REG | (freg << 3) | freg);
+ inst[3] = 0x51;
+ return SLJIT_SUCCESS;
+ }
+
+ if (u.imm[0] != u.imm[1]) {
+ SLJIT_ASSERT(u.imm[1] != 0 && cpu_feature_list != 0);
+
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, u.imm[1]);
+
+ if (cpu_feature_list & CPU_FEATURE_SSE41) {
+ FAIL_IF(emit_groupf_ext(compiler, PINSRD_x_rm_i8 | EX86_PREF_66 | VEX_OP_0F3A | EX86_SSE2_OP1, freg, TMP_REG1, 0));
+ return emit_byte(compiler, 1);
+ }
+
+ FAIL_IF(emit_groupf(compiler, MOVD_x_rm | EX86_PREF_66 | EX86_SSE2_OP1, TMP_FREG, TMP_REG1, 0));
+ tmp_freg = TMP_FREG;
+ }
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
+ FAIL_IF(!inst);
+ INC_SIZE(3);
+
+ inst[0] = GROUP_0F;
+ inst[1] = UNPCKLPS_x_xm;
+ inst[2] = U8(MOD_REG | (freg << 3) | tmp_freg);
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_u8 *inst;
+ sljit_s32 reg2;
+ sljit_sw regw, reg2w;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ regw = 0;
+ reg2 = 0;
+ reg2w = 0;
+
+ SLJIT_ASSERT(cpu_feature_list != 0);
+
+ if (!(op & SLJIT_32) && (cpu_feature_list & CPU_FEATURE_SSE41)) {
+ if (reg & REG_PAIR_MASK) {
+ reg2 = REG_PAIR_FIRST(reg);
+ reg = REG_PAIR_SECOND(reg);
+
+ CHECK_EXTRA_REGS(reg, regw, (void)0);
+
+ FAIL_IF(emit_groupf(compiler, (GET_OPCODE(op) == SLJIT_COPY_TO_F64 ? MOVD_x_rm : MOVD_rm_x)
+ | EX86_PREF_66 | EX86_SSE2_OP1, freg, reg, regw));
+ } else
+ reg2 = reg;
+
+ CHECK_EXTRA_REGS(reg2, reg2w, (void)0);
+
+ FAIL_IF(emit_groupf_ext(compiler, (GET_OPCODE(op) == SLJIT_COPY_TO_F64 ? PINSRD_x_rm_i8 : PEXTRD_rm_x_i8)
+ | EX86_PREF_66 | VEX_OP_0F3A | EX86_SSE2_OP1, freg, reg2, reg2w));
+ return emit_byte(compiler, 1);
+ }
+
+ if (reg & REG_PAIR_MASK) {
+ reg2 = REG_PAIR_SECOND(reg);
+ reg = REG_PAIR_FIRST(reg);
+
+ if (reg == reg2)
+ reg = 0;
+
+ CHECK_EXTRA_REGS(reg2, reg2w, (void)0);
+ }
+
+ CHECK_EXTRA_REGS(reg, regw, (void)0);
+
+ if (op & SLJIT_32)
+ return emit_groupf(compiler, (GET_OPCODE(op) == SLJIT_COPY_TO_F64 ? MOVD_x_rm : MOVD_rm_x)
+ | EX86_PREF_66 | EX86_SSE2_OP1, freg, reg, regw);
+
+ if (op == SLJIT_COPY_FROM_F64) {
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 5);
+ FAIL_IF(!inst);
+ INC_SIZE(5);
+
+ inst[0] = GROUP_66;
+ inst[1] = GROUP_0F;
+ inst[2] = PSHUFD_x_xm;
+ inst[3] = U8(MOD_REG | (TMP_FREG << 3) | freg);
+ inst[4] = 1;
+ } else if (reg != 0)
+ FAIL_IF(emit_groupf(compiler, MOVD_x_rm | EX86_PREF_66 | EX86_SSE2_OP1, TMP_FREG, reg, regw));
+
+ if (reg2 != 0)
+ FAIL_IF(emit_groupf(compiler, (GET_OPCODE(op) == SLJIT_COPY_TO_F64 ? MOVD_x_rm : MOVD_rm_x)
+ | EX86_PREF_66 | EX86_SSE2_OP1, freg, reg2, reg2w));
+
+ if (GET_OPCODE(op) == SLJIT_COPY_TO_F64) {
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
+ FAIL_IF(!inst);
+ INC_SIZE(3);
+
+ inst[0] = GROUP_0F;
+ inst[1] = UNPCKLPS_x_xm;
+ inst[2] = U8(MOD_REG | (freg << 3) | (reg == 0 ? freg : TMP_FREG));
+ } else
+ FAIL_IF(emit_groupf(compiler, MOVD_rm_x | EX86_PREF_66 | EX86_SSE2_OP1, TMP_FREG, reg, regw));
+
+ return SLJIT_SUCCESS;
+}
+
static sljit_s32 skip_frames_before_return(struct sljit_compiler *compiler)
{
- sljit_s32 size, saved_size;
- sljit_s32 has_f64_aligment;
+ sljit_sw size;
/* Don't adjust shadow stack if it isn't enabled. */
- if (!cpu_has_shadow_stack ())
+ if (!cpu_has_shadow_stack())
return SLJIT_SUCCESS;
- SLJIT_ASSERT(compiler->args >= 0);
+ SLJIT_ASSERT(compiler->args_size >= 0);
SLJIT_ASSERT(compiler->local_size > 0);
-#if !defined(__APPLE__)
- has_f64_aligment = compiler->options & SLJIT_F64_ALIGNMENT;
-#else
- has_f64_aligment = 0;
-#endif
-
size = compiler->local_size;
- saved_size = (1 + (compiler->scratches > 9 ? (compiler->scratches - 9) : 0) + (compiler->saveds <= 3 ? compiler->saveds : 3)) * sizeof(sljit_uw);
- if (has_f64_aligment) {
- /* mov TMP_REG1, [esp + local_size]. */
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), size);
- /* mov TMP_REG1, [TMP_REG1+ saved_size]. */
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(TMP_REG1), saved_size);
- /* Move return address to [esp]. */
- EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, TMP_REG1, 0);
- size = 0;
- } else
- size += saved_size;
+ size += (1 + (compiler->scratches > 9 ? (compiler->scratches - 9) : 0)
+ + (compiler->saveds <= 3 ? compiler->saveds : 3)) * SSIZE_OF(sw);
- return adjust_shadow_stack(compiler, SLJIT_UNUSED, 0, SLJIT_SP, size);
+ return adjust_shadow_stack(compiler, SLJIT_MEM1(SLJIT_SP), size);
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c
index e85b56a61a..f313f3f038 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c
@@ -26,6 +26,10 @@
/* x86 64-bit arch dependent functions. */
+/* --------------------------------------------------------------------- */
+/* Operators */
+/* --------------------------------------------------------------------- */
+
static sljit_s32 emit_load_imm64(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm)
{
sljit_u8 *inst;
@@ -33,15 +37,330 @@ static sljit_s32 emit_load_imm64(struct sljit_compiler *compiler, sljit_s32 reg,
inst = (sljit_u8*)ensure_buf(compiler, 1 + 2 + sizeof(sljit_sw));
FAIL_IF(!inst);
INC_SIZE(2 + sizeof(sljit_sw));
- *inst++ = REX_W | ((reg_map[reg] <= 7) ? 0 : REX_B);
- *inst++ = MOV_r_i32 + (reg_map[reg] & 0x7);
- sljit_unaligned_store_sw(inst, imm);
+ inst[0] = REX_W | ((reg_map[reg] <= 7) ? 0 : REX_B);
+ inst[1] = U8(MOV_r_i32 | reg_lmap[reg]);
+ sljit_unaligned_store_sw(inst + 2, imm);
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 emit_do_imm32(struct sljit_compiler *compiler, sljit_u8 rex, sljit_u8 opcode, sljit_sw imm)
+{
+ sljit_u8 *inst;
+ sljit_uw length = (rex ? 2 : 1) + sizeof(sljit_s32);
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + length);
+ FAIL_IF(!inst);
+ INC_SIZE(length);
+ if (rex)
+ *inst++ = rex;
+ *inst++ = opcode;
+ sljit_unaligned_store_s32(inst, (sljit_s32)imm);
+ return SLJIT_SUCCESS;
+}
+
+static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw size,
+ /* The register or immediate operand. */
+ sljit_s32 a, sljit_sw imma,
+ /* The general operand (not immediate). */
+ sljit_s32 b, sljit_sw immb)
+{
+ sljit_u8 *inst;
+ sljit_u8 *buf_ptr;
+ sljit_u8 rex = 0;
+ sljit_u8 reg_lmap_b;
+ sljit_uw flags = size;
+ sljit_uw inst_size;
+
+ /* The immediate operand must be 32 bit. */
+ SLJIT_ASSERT(a != SLJIT_IMM || compiler->mode32 || IS_HALFWORD(imma));
+ /* Both cannot be switched on. */
+ SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS));
+ /* Size flags not allowed for typed instructions. */
+ SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0);
+ /* Both size flags cannot be switched on. */
+ SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG));
+ /* SSE2 and immediate is not possible. */
+ SLJIT_ASSERT(a != SLJIT_IMM || !(flags & EX86_SSE2));
+ SLJIT_ASSERT(((flags & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66))
+ & ((flags & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66)) - 1)) == 0);
+ SLJIT_ASSERT((flags & (EX86_VEX_EXT | EX86_REX)) != EX86_VEX_EXT);
+
+ size &= 0xf;
+ /* The mod r/m byte is always present. */
+ inst_size = size + 1;
+
+ if (!compiler->mode32 && !(flags & EX86_NO_REXW))
+ rex |= REX_W;
+ else if (flags & EX86_REX)
+ rex |= REX;
+
+ if (flags & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66))
+ inst_size++;
+
+ /* Calculate size of b. */
+ if (b & SLJIT_MEM) {
+ if (!(b & OFFS_REG_MASK) && NOT_HALFWORD(immb)) {
+ PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb));
+ immb = 0;
+ if (b & REG_MASK)
+ b |= TO_OFFS_REG(TMP_REG2);
+ else
+ b |= TMP_REG2;
+ }
+
+ if (!(b & REG_MASK))
+ inst_size += 1 + sizeof(sljit_s32); /* SIB byte required to avoid RIP based addressing. */
+ else {
+ if (immb != 0 && !(b & OFFS_REG_MASK)) {
+ /* Immediate operand. */
+ if (immb <= 127 && immb >= -128)
+ inst_size += sizeof(sljit_s8);
+ else
+ inst_size += sizeof(sljit_s32);
+ } else if (reg_lmap[b & REG_MASK] == 5) {
+ /* Swap registers if possible. */
+ if ((b & OFFS_REG_MASK) && (immb & 0x3) == 0 && reg_lmap[OFFS_REG(b)] != 5)
+ b = SLJIT_MEM | OFFS_REG(b) | TO_OFFS_REG(b & REG_MASK);
+ else
+ inst_size += sizeof(sljit_s8);
+ }
+
+ if (reg_map[b & REG_MASK] >= 8)
+ rex |= REX_B;
+
+ if (reg_lmap[b & REG_MASK] == 4 && !(b & OFFS_REG_MASK))
+ b |= TO_OFFS_REG(SLJIT_SP);
+
+ if (b & OFFS_REG_MASK) {
+ inst_size += 1; /* SIB byte. */
+ if (reg_map[OFFS_REG(b)] >= 8)
+ rex |= REX_X;
+ }
+ }
+ } else if (!(flags & EX86_SSE2_OP2)) {
+ if (reg_map[b] >= 8)
+ rex |= REX_B;
+ } else if (freg_map[b] >= 8)
+ rex |= REX_B;
+
+ if ((flags & EX86_VEX_EXT) && (rex & 0x3)) {
+ SLJIT_ASSERT(size == 2);
+ size++;
+ inst_size++;
+ }
+
+ if (a == SLJIT_IMM) {
+ if (flags & EX86_BIN_INS) {
+ if (imma <= 127 && imma >= -128) {
+ inst_size += 1;
+ flags |= EX86_BYTE_ARG;
+ } else
+ inst_size += 4;
+ } else if (flags & EX86_SHIFT_INS) {
+ SLJIT_ASSERT(imma <= (compiler->mode32 ? 0x1f : 0x3f));
+ if (imma != 1) {
+ inst_size++;
+ flags |= EX86_BYTE_ARG;
+ }
+ } else if (flags & EX86_BYTE_ARG)
+ inst_size++;
+ else if (flags & EX86_HALF_ARG)
+ inst_size += sizeof(short);
+ else
+ inst_size += sizeof(sljit_s32);
+ } else {
+ SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
+ /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */
+ if (!(flags & EX86_SSE2_OP1)) {
+ if (reg_map[a] >= 8)
+ rex |= REX_R;
+ }
+ else if (freg_map[a] >= 8)
+ rex |= REX_R;
+ }
+
+ if (rex)
+ inst_size++;
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + inst_size);
+ PTR_FAIL_IF(!inst);
+
+ /* Encoding prefixes. */
+ INC_SIZE(inst_size);
+ if (flags & EX86_PREF_F2)
+ *inst++ = 0xf2;
+ else if (flags & EX86_PREF_F3)
+ *inst++ = 0xf3;
+ else if (flags & EX86_PREF_66)
+ *inst++ = 0x66;
+
+ /* Rex is always the last prefix. */
+ if (rex)
+ *inst++ = rex;
+
+ buf_ptr = inst + size;
+
+ /* Encode mod/rm byte. */
+ if (!(flags & EX86_SHIFT_INS)) {
+ if ((flags & EX86_BIN_INS) && a == SLJIT_IMM)
+ *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81;
+
+ if (a == SLJIT_IMM)
+ *buf_ptr = 0;
+ else if (!(flags & EX86_SSE2_OP1))
+ *buf_ptr = U8(reg_lmap[a] << 3);
+ else
+ *buf_ptr = U8(freg_lmap[a] << 3);
+ } else {
+ if (a == SLJIT_IMM) {
+ if (imma == 1)
+ *inst = GROUP_SHIFT_1;
+ else
+ *inst = GROUP_SHIFT_N;
+ } else
+ *inst = GROUP_SHIFT_CL;
+ *buf_ptr = 0;
+ }
+
+ if (!(b & SLJIT_MEM)) {
+ *buf_ptr = U8(*buf_ptr | MOD_REG | (!(flags & EX86_SSE2_OP2) ? reg_lmap[b] : freg_lmap[b]));
+ buf_ptr++;
+ } else if (b & REG_MASK) {
+ reg_lmap_b = reg_lmap[b & REG_MASK];
+
+ if (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) {
+ if (immb != 0 || reg_lmap_b == 5) {
+ if (immb <= 127 && immb >= -128)
+ *buf_ptr |= 0x40;
+ else
+ *buf_ptr |= 0x80;
+ }
+
+ if (!(b & OFFS_REG_MASK))
+ *buf_ptr++ |= reg_lmap_b;
+ else {
+ buf_ptr[0] |= 0x04;
+ buf_ptr[1] = U8(reg_lmap_b | (reg_lmap[OFFS_REG(b)] << 3));
+ buf_ptr += 2;
+ }
+
+ if (immb != 0 || reg_lmap_b == 5) {
+ if (immb <= 127 && immb >= -128)
+ *buf_ptr++ = U8(immb); /* 8 bit displacement. */
+ else {
+ sljit_unaligned_store_s32(buf_ptr, (sljit_s32)immb); /* 32 bit displacement. */
+ buf_ptr += sizeof(sljit_s32);
+ }
+ }
+ } else {
+ if (reg_lmap_b == 5)
+ *buf_ptr |= 0x40;
+
+ buf_ptr[0] |= 0x04;
+ buf_ptr[1] = U8(reg_lmap_b | (reg_lmap[OFFS_REG(b)] << 3) | (immb << 6));
+ buf_ptr += 2;
+
+ if (reg_lmap_b == 5)
+ *buf_ptr++ = 0;
+ }
+ } else {
+ buf_ptr[0] |= 0x04;
+ buf_ptr[1] = 0x25;
+ buf_ptr += 2;
+ sljit_unaligned_store_s32(buf_ptr, (sljit_s32)immb); /* 32 bit displacement. */
+ buf_ptr += sizeof(sljit_s32);
+ }
+
+ if (a == SLJIT_IMM) {
+ if (flags & EX86_BYTE_ARG)
+ *buf_ptr = U8(imma);
+ else if (flags & EX86_HALF_ARG)
+ sljit_unaligned_store_s16(buf_ptr, (sljit_s16)imma);
+ else if (!(flags & EX86_SHIFT_INS))
+ sljit_unaligned_store_s32(buf_ptr, (sljit_s32)imma);
+ }
+
+ return inst;
+}
+
+static sljit_s32 emit_vex_instruction(struct sljit_compiler *compiler, sljit_uw op,
+ /* The first and second register operand. */
+ sljit_s32 a, sljit_s32 v,
+ /* The general operand (not immediate). */
+ sljit_s32 b, sljit_sw immb)
+{
+ sljit_u8 *inst;
+ sljit_u8 vex = 0;
+ sljit_u8 vex_m = 0;
+ sljit_uw size;
+
+ SLJIT_ASSERT(((op & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66))
+ & ((op & (EX86_PREF_F2 | EX86_PREF_F3 | EX86_PREF_66)) - 1)) == 0);
+
+ op |= EX86_REX;
+
+ if (op & VEX_OP_0F38)
+ vex_m = 0x2;
+ else if (op & VEX_OP_0F3A)
+ vex_m = 0x3;
+
+ if ((op & VEX_W) || ((op & VEX_AUTO_W) && !compiler->mode32)) {
+ if (vex_m == 0)
+ vex_m = 0x1;
+
+ vex |= 0x80;
+ }
+
+ if (op & EX86_PREF_66)
+ vex |= 0x1;
+ else if (op & EX86_PREF_F2)
+ vex |= 0x3;
+ else if (op & EX86_PREF_F3)
+ vex |= 0x2;
+
+ op &= ~(EX86_PREF_66 | EX86_PREF_F2 | EX86_PREF_F3);
+
+ if (op & VEX_256)
+ vex |= 0x4;
+
+ vex = U8(vex | ((((op & VEX_SSE2_OPV) ? freg_map[v] : reg_map[v]) ^ 0xf) << 3));
+
+ size = op & ~(sljit_uw)0xff;
+ size |= (vex_m == 0) ? (EX86_VEX_EXT | 2) : 3;
+
+ inst = emit_x86_instruction(compiler, size, a, 0, b, immb);
+ FAIL_IF(!inst);
+
+ SLJIT_ASSERT((inst[-1] & 0xf0) == REX);
+
+ /* If X or B is present in REX prefix. */
+ if (vex_m == 0 && inst[-1] & 0x3)
+ vex_m = 0x1;
+
+ if (vex_m == 0) {
+ vex |= U8(((inst[-1] >> 2) ^ 0x1) << 7);
+
+ inst[-1] = 0xc5;
+ inst[0] = vex;
+ inst[1] = U8(op);
+ return SLJIT_SUCCESS;
+ }
+
+ vex_m |= U8((inst[-1] ^ 0x7) << 5);
+ inst[-1] = 0xc4;
+ inst[0] = vex_m;
+ inst[1] = vex;
+ inst[2] = U8(op);
return SLJIT_SUCCESS;
}
+/* --------------------------------------------------------------------- */
+/* Enter / return */
+/* --------------------------------------------------------------------- */
+
static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr)
{
- sljit_s32 type = jump->flags >> TYPE_SHIFT;
+ sljit_uw type = jump->flags >> TYPE_SHIFT;
int short_addr = !(jump->flags & SLJIT_REWRITABLE_JUMP) && !(jump->flags & JUMP_LABEL) && (jump->u.target <= 0xffffffff);
@@ -50,7 +369,7 @@ static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_
if (type < SLJIT_JUMP) {
/* Invert type. */
- *code_ptr++ = get_jump_code(type ^ 0x1) - 0x10;
+ *code_ptr++ = U8(get_jump_code(type ^ 0x1) - 0x10);
*code_ptr++ = short_addr ? (6 + 3) : (10 + 3);
}
@@ -63,13 +382,13 @@ static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_
else if (short_addr)
sljit_unaligned_store_s32(code_ptr, (sljit_s32)jump->u.target);
else
- sljit_unaligned_store_sw(code_ptr, jump->u.target);
+ sljit_unaligned_store_sw(code_ptr, (sljit_sw)jump->u.target);
code_ptr += short_addr ? sizeof(sljit_s32) : sizeof(sljit_sw);
*code_ptr++ = REX_B;
*code_ptr++ = GROUP_FF;
- *code_ptr++ = MOD_REG | (type >= SLJIT_FAST_CALL ? CALL_rm : JMP_rm) | reg_lmap[TMP_REG2];
+ *code_ptr++ = U8(MOD_REG | (type >= SLJIT_FAST_CALL ? CALL_rm : JMP_rm) | reg_lmap[TMP_REG2]);
return code_ptr;
}
@@ -90,7 +409,7 @@ static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, slji
SLJIT_ASSERT((code_ptr[1] & 0xf8) == MOV_r_i32);
if ((code_ptr[0] & 0x07) != 0) {
- code_ptr[0] = (sljit_u8)(code_ptr[0] & ~0x08);
+ code_ptr[0] = U8(code_ptr[0] & ~0x08);
code_ptr += 2 + sizeof(sljit_s32);
}
else {
@@ -114,9 +433,9 @@ static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, slji
SLJIT_ASSERT(code_ptr[1] == MOV_rm_r);
- code_ptr[0] = (sljit_u8)(code_ptr[0] & ~0x4);
+ code_ptr[0] = U8(code_ptr[0] & ~0x4);
code_ptr[1] = MOV_rm_i32;
- code_ptr[2] = (sljit_u8)(code_ptr[2] & ~(0x7 << 3));
+ code_ptr[2] = U8(code_ptr[2] & ~(0x7 << 3));
code_ptr = (sljit_u8*)(put_label->addr - (2 + sizeof(sljit_uw)) + sizeof(sljit_s32));
put_label->addr = (sljit_uw)code_ptr;
@@ -124,35 +443,44 @@ static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, slji
return code_ptr;
}
+#ifdef _WIN64
+typedef struct {
+ sljit_sw regs[2];
+} sljit_sse2_reg;
+#endif /* _WIN64 */
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 args, i, tmp, size, saved_register_size;
+ sljit_uw size;
+ sljit_s32 word_arg_count = 0;
+ sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
+ sljit_s32 saved_regs_size, tmp, i;
+#ifdef _WIN64
+ sljit_s32 saved_float_regs_size;
+ sljit_s32 saved_float_regs_offset = 0;
+ sljit_s32 float_arg_count = 0;
+#endif /* _WIN64 */
sljit_u8 *inst;
CHECK_ERROR();
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
+ if (options & SLJIT_ENTER_REG_ARG)
+ arg_types = 0;
+
/* Emit ENDBR64 at function entry if needed. */
FAIL_IF(emit_endbranch(compiler));
compiler->mode32 = 0;
-#ifdef _WIN64
- /* Two/four register slots for parameters plus space for xmm6 register if needed. */
- if (fscratches >= 6 || fsaveds >= 1)
- compiler->locals_offset = 6 * sizeof(sljit_sw);
- else
- compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw);
-#endif
-
/* Including the return address saved by the call instruction. */
- saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
+ saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1);
- tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = SLJIT_S0; i >= tmp; i--) {
+ tmp = SLJIT_S0 - saveds;
+ for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) {
size = reg_map[i] >= 8 ? 2 : 1;
inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
FAIL_IF(!inst);
@@ -172,55 +500,75 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
PUSH_REG(reg_lmap[i]);
}
- args = get_arg_count(arg_types);
+#ifdef _WIN64
+ local_size += SLJIT_LOCALS_OFFSET;
+ saved_float_regs_size = GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sse2_reg);
- if (args > 0) {
- size = args * 3;
- inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
- FAIL_IF(!inst);
+ if (saved_float_regs_size > 0) {
+ saved_float_regs_offset = ((local_size + 0xf) & ~0xf);
+ local_size = saved_float_regs_offset + saved_float_regs_size;
+ }
+#else /* !_WIN64 */
+ SLJIT_ASSERT(SLJIT_LOCALS_OFFSET == 0);
+#endif /* _WIN64 */
- INC_SIZE(size);
+ arg_types >>= SLJIT_ARG_SHIFT;
+ while (arg_types > 0) {
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
+ tmp = 0;
#ifndef _WIN64
- if (args > 0) {
- inst[0] = REX_W;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x7 /* rdi */;
- inst += 3;
- }
- if (args > 1) {
- inst[0] = REX_W | REX_R;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_lmap[SLJIT_S1] << 3) | 0x6 /* rsi */;
- inst += 3;
- }
- if (args > 2) {
- inst[0] = REX_W | REX_R;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_lmap[SLJIT_S2] << 3) | 0x2 /* rdx */;
- }
-#else
- if (args > 0) {
- inst[0] = REX_W;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x1 /* rcx */;
- inst += 3;
- }
- if (args > 1) {
- inst[0] = REX_W;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_map[SLJIT_S1] << 3) | 0x2 /* rdx */;
- inst += 3;
- }
- if (args > 2) {
- inst[0] = REX_W | REX_B;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_map[SLJIT_S2] << 3) | 0x0 /* r8 */;
+ switch (word_arg_count) {
+ case 0:
+ tmp = SLJIT_R2;
+ break;
+ case 1:
+ tmp = SLJIT_R1;
+ break;
+ case 2:
+ tmp = TMP_REG1;
+ break;
+ default:
+ tmp = SLJIT_R3;
+ break;
+ }
+#else /* !_WIN64 */
+ switch (word_arg_count + float_arg_count) {
+ case 0:
+ tmp = SLJIT_R3;
+ break;
+ case 1:
+ tmp = SLJIT_R1;
+ break;
+ case 2:
+ tmp = SLJIT_R2;
+ break;
+ default:
+ tmp = TMP_REG1;
+ break;
+ }
+#endif /* _WIN64 */
+ if (arg_types & SLJIT_ARG_TYPE_SCRATCH_REG) {
+ if (tmp != SLJIT_R0 + word_arg_count)
+ EMIT_MOV(compiler, SLJIT_R0 + word_arg_count, 0, tmp, 0);
+ } else {
+ EMIT_MOV(compiler, SLJIT_S0 - saved_arg_count, 0, tmp, 0);
+ saved_arg_count++;
+ }
+ word_arg_count++;
+ } else {
+#ifdef _WIN64
+ SLJIT_COMPILE_ASSERT(SLJIT_FR0 == 1, float_register_index_start);
+ float_arg_count++;
+ if (float_arg_count != float_arg_count + word_arg_count)
+ FAIL_IF(emit_sse2_load(compiler, (arg_types & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F32,
+ float_arg_count, float_arg_count + word_arg_count, 0));
+#endif /* _WIN64 */
}
-#endif
+ arg_types >>= SLJIT_ARG_SHIFT;
}
- local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_register_size + 15) & ~15) - saved_register_size;
+ local_size = ((local_size + saved_regs_size + 0xf) & ~0xf) - saved_regs_size;
compiler->local_size = local_size;
#ifdef _WIN64
@@ -234,44 +582,45 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 3);
}
else {
- EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_SP, 0);
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, (local_size - 1) >> 12);
-
- SLJIT_ASSERT (reg_map[SLJIT_R0] == 0);
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, local_size >> 12);
- EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_MEM1(SLJIT_R0), -4096);
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 4096));
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, 1));
+ EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_MEM1(SLJIT_SP), -4096);
+ BINARY_IMM32(SUB, 4096, SLJIT_SP, 0);
+ BINARY_IMM32(SUB, 1, TMP_REG1, 0);
inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
FAIL_IF(!inst);
INC_SIZE(2);
inst[0] = JNE_i8;
- inst[1] = (sljit_s8) -19;
+ inst[1] = (sljit_u8)-21;
+ local_size &= 0xfff;
}
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -local_size);
+ if (local_size > 0)
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -local_size);
}
-#endif
+#endif /* _WIN64 */
- if (local_size > 0) {
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size));
- }
+ if (local_size > 0)
+ BINARY_IMM32(SUB, local_size, SLJIT_SP, 0);
#ifdef _WIN64
- /* Save xmm6 register: movaps [rsp + 0x20], xmm6 */
- if (fscratches >= 6 || fsaveds >= 1) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 5);
- FAIL_IF(!inst);
- INC_SIZE(5);
- *inst++ = GROUP_0F;
- sljit_unaligned_store_s32(inst, 0x20247429);
+ if (saved_float_regs_size > 0) {
+ compiler->mode32 = 1;
+
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ FAIL_IF(emit_groupf(compiler, MOVAPS_xm_x | EX86_SSE2, i, SLJIT_MEM1(SLJIT_SP), saved_float_regs_offset));
+ saved_float_regs_offset += 16;
+ }
+
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ FAIL_IF(emit_groupf(compiler, MOVAPS_xm_x | EX86_SSE2, i, SLJIT_MEM1(SLJIT_SP), saved_float_regs_offset));
+ saved_float_regs_offset += 16;
+ }
}
-#endif
+#endif /* _WIN64 */
return SLJIT_SUCCESS;
}
@@ -280,67 +629,73 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 saved_register_size;
+ sljit_s32 saved_regs_size;
+#ifdef _WIN64
+ sljit_s32 saved_float_regs_size;
+#endif /* _WIN64 */
CHECK_ERROR();
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
#ifdef _WIN64
- /* Two/four register slots for parameters plus space for xmm6 register if needed. */
- if (fscratches >= 6 || fsaveds >= 1)
- compiler->locals_offset = 6 * sizeof(sljit_sw);
- else
- compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw);
-#endif
+ local_size += SLJIT_LOCALS_OFFSET;
+ saved_float_regs_size = GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sse2_reg);
+
+ if (saved_float_regs_size > 0)
+ local_size = ((local_size + 0xf) & ~0xf) + saved_float_regs_size;
+#else /* !_WIN64 */
+ SLJIT_ASSERT(SLJIT_LOCALS_OFFSET == 0);
+#endif /* _WIN64 */
/* Including the return address saved by the call instruction. */
- saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
- compiler->local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_register_size + 15) & ~15) - saved_register_size;
+ saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1);
+ compiler->local_size = ((local_size + saved_regs_size + 0xf) & ~0xf) - saved_regs_size;
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to)
{
- sljit_s32 i, tmp, size;
+ sljit_uw size;
+ sljit_s32 local_size, i, tmp;
sljit_u8 *inst;
-
- CHECK_ERROR();
- CHECK(check_sljit_emit_return(compiler, op, src, srcw));
-
- FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
+#ifdef _WIN64
+ sljit_s32 saved_float_regs_offset;
+ sljit_s32 fscratches = compiler->fscratches;
+ sljit_s32 fsaveds = compiler->fsaveds;
+#endif /* _WIN64 */
#ifdef _WIN64
- /* Restore xmm6 register: movaps xmm6, [rsp + 0x20] */
- if (compiler->fscratches >= 6 || compiler->fsaveds >= 1) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 5);
- FAIL_IF(!inst);
- INC_SIZE(5);
- *inst++ = GROUP_0F;
- sljit_unaligned_store_s32(inst, 0x20247428);
- }
-#endif
+ saved_float_regs_offset = GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sse2_reg);
- if (compiler->local_size > 0) {
- if (compiler->local_size <= 127) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
- FAIL_IF(!inst);
- INC_SIZE(4);
- *inst++ = REX_W;
- *inst++ = GROUP_BINARY_83;
- *inst++ = MOD_REG | ADD | 4;
- *inst = compiler->local_size;
+ if (saved_float_regs_offset > 0) {
+ compiler->mode32 = 1;
+ saved_float_regs_offset = (compiler->local_size - saved_float_regs_offset) & ~0xf;
+
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ FAIL_IF(emit_groupf(compiler, MOVAPS_x_xm | EX86_SSE2, i, SLJIT_MEM1(SLJIT_SP), saved_float_regs_offset));
+ saved_float_regs_offset += 16;
}
- else {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 7);
- FAIL_IF(!inst);
- INC_SIZE(7);
- *inst++ = REX_W;
- *inst++ = GROUP_BINARY_81;
- *inst++ = MOD_REG | ADD | 4;
- sljit_unaligned_store_s32(inst, compiler->local_size);
+
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ FAIL_IF(emit_groupf(compiler, MOVAPS_x_xm | EX86_SSE2, i, SLJIT_MEM1(SLJIT_SP), saved_float_regs_offset));
+ saved_float_regs_offset += 16;
}
+
+ compiler->mode32 = 0;
}
+#endif /* _WIN64 */
+
+ local_size = compiler->local_size;
+
+ if (is_return_to && compiler->scratches < SLJIT_FIRST_SAVED_REG && (compiler->saveds == SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ local_size += SSIZE_OF(sw);
+ is_return_to = 0;
+ }
+
+ if (local_size > 0)
+ BINARY_IMM32(ADD, local_size, SLJIT_SP, 0);
tmp = compiler->scratches;
for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) {
@@ -353,8 +708,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
POP_REG(reg_lmap[i]);
}
- tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = tmp; i <= SLJIT_S0; i++) {
+ tmp = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
+ for (i = SLJIT_S0 + 1 - compiler->saveds; i <= tmp; i++) {
size = reg_map[i] >= 8 ? 2 : 1;
inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
FAIL_IF(!inst);
@@ -364,243 +719,43 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
POP_REG(reg_lmap[i]);
}
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- RET();
- return SLJIT_SUCCESS;
-}
-
-/* --------------------------------------------------------------------- */
-/* Operators */
-/* --------------------------------------------------------------------- */
-
-static sljit_s32 emit_do_imm32(struct sljit_compiler *compiler, sljit_u8 rex, sljit_u8 opcode, sljit_sw imm)
-{
- sljit_u8 *inst;
- sljit_s32 length = 1 + (rex ? 1 : 0) + sizeof(sljit_s32);
+ if (is_return_to)
+ BINARY_IMM32(ADD, sizeof(sljit_sw), SLJIT_SP, 0);
- inst = (sljit_u8*)ensure_buf(compiler, 1 + length);
- FAIL_IF(!inst);
- INC_SIZE(length);
- if (rex)
- *inst++ = rex;
- *inst++ = opcode;
- sljit_unaligned_store_s32(inst, imm);
return SLJIT_SUCCESS;
}
-static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32 size,
- /* The register or immediate operand. */
- sljit_s32 a, sljit_sw imma,
- /* The general operand (not immediate). */
- sljit_s32 b, sljit_sw immb)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
{
- sljit_u8 *inst;
- sljit_u8 *buf_ptr;
- sljit_u8 rex = 0;
- sljit_s32 flags = size & ~0xf;
- sljit_s32 inst_size;
-
- /* The immediate operand must be 32 bit. */
- SLJIT_ASSERT(!(a & SLJIT_IMM) || compiler->mode32 || IS_HALFWORD(imma));
- /* Both cannot be switched on. */
- SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS));
- /* Size flags not allowed for typed instructions. */
- SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0);
- /* Both size flags cannot be switched on. */
- SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG));
- /* SSE2 and immediate is not possible. */
- SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2));
- SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3)
- && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66)
- && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66));
-
- size &= 0xf;
- inst_size = size;
-
- if (!compiler->mode32 && !(flags & EX86_NO_REXW))
- rex |= REX_W;
- else if (flags & EX86_REX)
- rex |= REX;
-
- if (flags & (EX86_PREF_F2 | EX86_PREF_F3))
- inst_size++;
- if (flags & EX86_PREF_66)
- inst_size++;
-
- /* Calculate size of b. */
- inst_size += 1; /* mod r/m byte. */
- if (b & SLJIT_MEM) {
- if (!(b & OFFS_REG_MASK)) {
- if (NOT_HALFWORD(immb)) {
- PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb));
- immb = 0;
- if (b & REG_MASK)
- b |= TO_OFFS_REG(TMP_REG2);
- else
- b |= TMP_REG2;
- }
- else if (reg_lmap[b & REG_MASK] == 4)
- b |= TO_OFFS_REG(SLJIT_SP);
- }
-
- if ((b & REG_MASK) == SLJIT_UNUSED)
- inst_size += 1 + sizeof(sljit_s32); /* SIB byte required to avoid RIP based addressing. */
- else {
- if (reg_map[b & REG_MASK] >= 8)
- rex |= REX_B;
-
- if (immb != 0 && (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP))) {
- /* Immediate operand. */
- if (immb <= 127 && immb >= -128)
- inst_size += sizeof(sljit_s8);
- else
- inst_size += sizeof(sljit_s32);
- }
- else if (reg_lmap[b & REG_MASK] == 5)
- inst_size += sizeof(sljit_s8);
-
- if ((b & OFFS_REG_MASK) != SLJIT_UNUSED) {
- inst_size += 1; /* SIB byte. */
- if (reg_map[OFFS_REG(b)] >= 8)
- rex |= REX_X;
- }
- }
- }
- else if (!(flags & EX86_SSE2_OP2)) {
- if (reg_map[b] >= 8)
- rex |= REX_B;
- }
- else if (freg_map[b] >= 8)
- rex |= REX_B;
-
- if (a & SLJIT_IMM) {
- if (flags & EX86_BIN_INS) {
- if (imma <= 127 && imma >= -128) {
- inst_size += 1;
- flags |= EX86_BYTE_ARG;
- } else
- inst_size += 4;
- }
- else if (flags & EX86_SHIFT_INS) {
- imma &= compiler->mode32 ? 0x1f : 0x3f;
- if (imma != 1) {
- inst_size ++;
- flags |= EX86_BYTE_ARG;
- }
- } else if (flags & EX86_BYTE_ARG)
- inst_size++;
- else if (flags & EX86_HALF_ARG)
- inst_size += sizeof(short);
- else
- inst_size += sizeof(sljit_s32);
- }
- else {
- SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
- /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */
- if (!(flags & EX86_SSE2_OP1)) {
- if (reg_map[a] >= 8)
- rex |= REX_R;
- }
- else if (freg_map[a] >= 8)
- rex |= REX_R;
- }
-
- if (rex)
- inst_size++;
-
- inst = (sljit_u8*)ensure_buf(compiler, 1 + inst_size);
- PTR_FAIL_IF(!inst);
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
- /* Encoding the byte. */
- INC_SIZE(inst_size);
- if (flags & EX86_PREF_F2)
- *inst++ = 0xf2;
- if (flags & EX86_PREF_F3)
- *inst++ = 0xf3;
- if (flags & EX86_PREF_66)
- *inst++ = 0x66;
- if (rex)
- *inst++ = rex;
- buf_ptr = inst + size;
+ compiler->mode32 = 0;
- /* Encode mod/rm byte. */
- if (!(flags & EX86_SHIFT_INS)) {
- if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
- *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81;
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
+ return emit_byte(compiler, RET_near);
+}
- if (a & SLJIT_IMM)
- *buf_ptr = 0;
- else if (!(flags & EX86_SSE2_OP1))
- *buf_ptr = reg_lmap[a] << 3;
- else
- *buf_ptr = freg_lmap[a] << 3;
- }
- else {
- if (a & SLJIT_IMM) {
- if (imma == 1)
- *inst = GROUP_SHIFT_1;
- else
- *inst = GROUP_SHIFT_N;
- } else
- *inst = GROUP_SHIFT_CL;
- *buf_ptr = 0;
- }
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_to(compiler, src, srcw));
- if (!(b & SLJIT_MEM))
- *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_lmap[b] : freg_lmap[b]);
- else if ((b & REG_MASK) != SLJIT_UNUSED) {
- if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) {
- if (immb != 0 || reg_lmap[b & REG_MASK] == 5) {
- if (immb <= 127 && immb >= -128)
- *buf_ptr |= 0x40;
- else
- *buf_ptr |= 0x80;
- }
+ compiler->mode32 = 0;
- if ((b & OFFS_REG_MASK) == SLJIT_UNUSED)
- *buf_ptr++ |= reg_lmap[b & REG_MASK];
- else {
- *buf_ptr++ |= 0x04;
- *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3);
- }
+ if ((src & SLJIT_MEM) || (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
- if (immb != 0 || reg_lmap[b & REG_MASK] == 5) {
- if (immb <= 127 && immb >= -128)
- *buf_ptr++ = immb; /* 8 bit displacement. */
- else {
- sljit_unaligned_store_s32(buf_ptr, immb); /* 32 bit displacement. */
- buf_ptr += sizeof(sljit_s32);
- }
- }
- }
- else {
- if (reg_lmap[b & REG_MASK] == 5)
- *buf_ptr |= 0x40;
- *buf_ptr++ |= 0x04;
- *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3) | (immb << 6);
- if (reg_lmap[b & REG_MASK] == 5)
- *buf_ptr++ = 0;
- }
- }
- else {
- *buf_ptr++ |= 0x04;
- *buf_ptr++ = 0x25;
- sljit_unaligned_store_s32(buf_ptr, immb); /* 32 bit displacement. */
- buf_ptr += sizeof(sljit_s32);
+ EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
+ src = TMP_REG2;
+ srcw = 0;
}
- if (a & SLJIT_IMM) {
- if (flags & EX86_BYTE_ARG)
- *buf_ptr = imma;
- else if (flags & EX86_HALF_ARG)
- sljit_unaligned_store_s16(buf_ptr, imma);
- else if (!(flags & EX86_SHIFT_INS))
- sljit_unaligned_store_s32(buf_ptr, imma);
- }
+ FAIL_IF(emit_stack_frame_release(compiler, 1));
- return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1);
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
}
/* --------------------------------------------------------------------- */
@@ -609,43 +764,38 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
#ifndef _WIN64
-static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr)
{
sljit_s32 src = src_ptr ? (*src_ptr) : 0;
sljit_s32 word_arg_count = 0;
SLJIT_ASSERT(reg_map[SLJIT_R1] == 6 && reg_map[SLJIT_R3] == 1 && reg_map[TMP_REG1] == 2);
-
- compiler->mode32 = 0;
+ SLJIT_ASSERT(!(src & SLJIT_MEM));
/* Remove return value. */
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32)
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64)
word_arg_count++;
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
if (word_arg_count == 0)
return SLJIT_SUCCESS;
- if (src & SLJIT_MEM) {
- ADJUST_LOCAL_OFFSET(src, srcw);
- EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
- *src_ptr = TMP_REG2;
+ if (word_arg_count >= 3) {
+ if (src == SLJIT_R2)
+ *src_ptr = TMP_REG1;
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R2, 0);
}
- else if (src == SLJIT_R2 && word_arg_count >= SLJIT_R2)
- *src_ptr = TMP_REG1;
- if (word_arg_count >= 3)
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R2, 0);
return emit_mov(compiler, SLJIT_R2, 0, SLJIT_R0, 0);
}
#else
-static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr)
{
sljit_s32 src = src_ptr ? (*src_ptr) : 0;
sljit_s32 arg_count = 0;
@@ -656,16 +806,16 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
static sljit_u8 word_arg_regs[5] = { 0, SLJIT_R3, SLJIT_R1, SLJIT_R2, TMP_REG1 };
SLJIT_ASSERT(reg_map[SLJIT_R3] == 1 && reg_map[SLJIT_R1] == 2 && reg_map[SLJIT_R2] == 8 && reg_map[TMP_REG1] == 9);
+ SLJIT_ASSERT(!(src & SLJIT_MEM));
- compiler->mode32 = 0;
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+ types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ switch (arg_types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
arg_count++;
float_arg_count++;
@@ -687,29 +837,23 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
if (!data_trandfer)
return SLJIT_SUCCESS;
- if (src & SLJIT_MEM) {
- ADJUST_LOCAL_OFFSET(src, srcw);
- EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
- *src_ptr = TMP_REG2;
- }
-
while (types) {
- switch (types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
if (arg_count != float_arg_count)
- FAIL_IF(emit_sse2_load(compiler, 1, arg_count, float_arg_count, 0));
+ FAIL_IF(emit_sse2_load(compiler, 0, arg_count, float_arg_count, 0));
arg_count--;
float_arg_count--;
break;
- case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
if (arg_count != float_arg_count)
- FAIL_IF(emit_sse2_load(compiler, 0, arg_count, float_arg_count, 0));
+ FAIL_IF(emit_sse2_load(compiler, 1, arg_count, float_arg_count, 0));
arg_count--;
float_arg_count--;
break;
@@ -721,7 +865,7 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
break;
}
- types >>= SLJIT_DEF_SHIFT;
+ types >>= SLJIT_ARG_SHIFT;
}
return SLJIT_SUCCESS;
@@ -735,13 +879,17 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
- PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL, 0));
+ compiler->mode32 = 0;
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
+ if (type & SLJIT_CALL_RETURN) {
+ PTR_FAIL_IF(emit_stack_frame_release(compiler, 0));
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
+ }
+
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_jump(compiler, type);
}
@@ -752,36 +900,40 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
- FAIL_IF(call_with_args(compiler, arg_types, &src, srcw));
+ compiler->mode32 = 0;
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
+ src = TMP_REG2;
+ }
+ if (type & SLJIT_CALL_RETURN) {
+ if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
+ EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
+ src = TMP_REG2;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler, 0));
+ }
+
+ if ((type & 0xff) != SLJIT_CALL_REG_ARG)
+ FAIL_IF(call_with_args(compiler, arg_types, &src));
+
+ if (type & SLJIT_CALL_RETURN)
+ type = SLJIT_JUMP;
+
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_ijump(compiler, type, src, srcw);
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+static sljit_s32 emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
{
sljit_u8 *inst;
- CHECK_ERROR();
- CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
- ADJUST_LOCAL_OFFSET(dst, dstw);
-
- /* For UNUSED dst. Uncommon, but possible. */
- if (dst == SLJIT_UNUSED)
- dst = TMP_REG1;
-
if (FAST_IS_REG(dst)) {
- if (reg_map[dst] < 8) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- POP_REG(reg_lmap[dst]);
- return SLJIT_SUCCESS;
- }
+ if (reg_map[dst] < 8)
+ return emit_byte(compiler, U8(POP_r + reg_lmap[dst]));
inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
FAIL_IF(!inst);
@@ -795,7 +947,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
compiler->mode32 = 1;
inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw);
FAIL_IF(!inst);
- *inst++ = POP_rm;
+ *inst = POP_rm;
return SLJIT_SUCCESS;
}
@@ -825,8 +977,8 @@ static sljit_s32 emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src
compiler->mode32 = 1;
inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
FAIL_IF(!inst);
- *inst++ = GROUP_FF;
- *inst |= PUSH_rm;
+ inst[0] = GROUP_FF;
+ inst[1] |= PUSH_rm;
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
FAIL_IF(!inst);
@@ -837,10 +989,100 @@ static sljit_s32 emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src
return SLJIT_SUCCESS;
}
+static sljit_s32 sljit_emit_get_return_address(struct sljit_compiler *compiler,
+ sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_s32 saved_regs_size;
+
+ compiler->mode32 = 0;
+ saved_regs_size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds - SLJIT_KEPT_SAVEDS_COUNT(compiler->options), 0);
+ return emit_mov(compiler, dst, dstw, SLJIT_MEM1(SLJIT_SP), compiler->local_size + saved_regs_size);
+}
+
/* --------------------------------------------------------------------- */
-/* Extend input */
+/* Other operations */
/* --------------------------------------------------------------------- */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_u8* inst;
+ sljit_s32 i, next, reg_idx;
+ sljit_u8 regs[2];
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ if (!(reg & REG_PAIR_MASK))
+ return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
+
+ ADJUST_LOCAL_OFFSET(mem, memw);
+
+ compiler->mode32 = 0;
+
+ if ((mem & REG_MASK) == 0) {
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, memw);
+
+ mem = SLJIT_MEM1(TMP_REG1);
+ memw = 0;
+ } else if (!(mem & OFFS_REG_MASK) && ((memw < HALFWORD_MIN) || (memw > HALFWORD_MAX - SSIZE_OF(sw)))) {
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, memw);
+
+ mem = SLJIT_MEM2(mem & REG_MASK, TMP_REG1);
+ memw = 0;
+ }
+
+ regs[0] = U8(REG_PAIR_FIRST(reg));
+ regs[1] = U8(REG_PAIR_SECOND(reg));
+
+ next = SSIZE_OF(sw);
+
+ if (!(type & SLJIT_MEM_STORE) && (regs[0] == (mem & REG_MASK) || regs[0] == OFFS_REG(mem))) {
+ if (regs[1] == (mem & REG_MASK) || regs[1] == OFFS_REG(mem)) {
+ /* Base and offset cannot be TMP_REG1. */
+ EMIT_MOV(compiler, TMP_REG1, 0, OFFS_REG(mem), 0);
+
+ if (regs[1] == OFFS_REG(mem))
+ next = -SSIZE_OF(sw);
+
+ mem = (mem & ~OFFS_REG_MASK) | TO_OFFS_REG(TMP_REG1);
+ } else {
+ next = -SSIZE_OF(sw);
+
+ if (!(mem & OFFS_REG_MASK))
+ memw += SSIZE_OF(sw);
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ reg_idx = next > 0 ? i : (i ^ 0x1);
+ reg = regs[reg_idx];
+
+ if ((mem & OFFS_REG_MASK) && (reg_idx == 1)) {
+ inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(1 + 5));
+ FAIL_IF(!inst);
+
+ INC_SIZE(5);
+
+ inst[0] = U8(REX_W | ((reg_map[reg] >= 8) ? REX_R : 0) | ((reg_map[mem & REG_MASK] >= 8) ? REX_B : 0) | ((reg_map[OFFS_REG(mem)] >= 8) ? REX_X : 0));
+ inst[1] = (type & SLJIT_MEM_STORE) ? MOV_rm_r : MOV_r_rm;
+ inst[2] = 0x44 | U8(reg_lmap[reg] << 3);
+ inst[3] = U8(memw << 6) | U8(reg_lmap[OFFS_REG(mem)] << 3) | reg_lmap[mem & REG_MASK];
+ inst[4] = sizeof(sljit_sw);
+ } else if (type & SLJIT_MEM_STORE) {
+ EMIT_MOV(compiler, mem, memw, reg, 0);
+ } else {
+ EMIT_MOV(compiler, reg, 0, mem, memw);
+ }
+
+ if (!(mem & OFFS_REG_MASK))
+ memw += next;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
static sljit_s32 emit_mov_int(struct sljit_compiler *compiler, sljit_s32 sign,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
@@ -850,18 +1092,15 @@ static sljit_s32 emit_mov_int(struct sljit_compiler *compiler, sljit_s32 sign,
compiler->mode32 = 0;
- if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM))
- return SLJIT_SUCCESS; /* Empty instruction. */
-
- if (src & SLJIT_IMM) {
+ if (src == SLJIT_IMM) {
if (FAST_IS_REG(dst)) {
- if (sign || ((sljit_uw)srcw <= 0x7fffffff)) {
- inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_s32)srcw, dst, dstw);
- FAIL_IF(!inst);
- *inst = MOV_rm_i32;
- return SLJIT_SUCCESS;
- }
- return emit_load_imm64(compiler, dst, srcw);
+ if (!sign || ((sljit_u32)srcw <= 0x7fffffff))
+ return emit_do_imm32(compiler, reg_map[dst] <= 7 ? 0 : REX_B, U8(MOV_r_i32 | reg_lmap[dst]), srcw);
+
+ inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_s32)srcw, dst, dstw);
+ FAIL_IF(!inst);
+ *inst = MOV_rm_i32;
+ return SLJIT_SUCCESS;
}
compiler->mode32 = 1;
inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_s32)srcw, dst, dstw);
@@ -879,10 +1118,10 @@ static sljit_s32 emit_mov_int(struct sljit_compiler *compiler, sljit_s32 sign,
if (sign) {
inst = emit_x86_instruction(compiler, 1, dst_r, 0, src, srcw);
FAIL_IF(!inst);
- *inst++ = MOVSXD_r_rm;
+ *inst = MOVSXD_r_rm;
} else {
compiler->mode32 = 1;
- FAIL_IF(emit_mov(compiler, dst_r, 0, src, srcw));
+ EMIT_MOV(compiler, dst_r, 0, src, srcw);
compiler->mode32 = 0;
}
}
@@ -898,21 +1137,218 @@ static sljit_s32 emit_mov_int(struct sljit_compiler *compiler, sljit_s32 sign,
return SLJIT_SUCCESS;
}
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG;
+ sljit_u8 *inst, *jump_inst1, *jump_inst2;
+ sljit_uw size1, size2;
+
+ compiler->mode32 = 0;
+
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_U32) {
+ if (src != SLJIT_IMM) {
+ compiler->mode32 = 1;
+ EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
+ compiler->mode32 = 0;
+ } else
+ FAIL_IF(emit_do_imm32(compiler, reg_map[TMP_REG1] <= 7 ? 0 : REX_B, U8(MOV_r_i32 | reg_lmap[TMP_REG1]), srcw));
+
+ FAIL_IF(emit_groupf(compiler, CVTSI2SD_x_rm | EX86_SELECT_F2_F3(op) | EX86_SSE2_OP1, dst_r, TMP_REG1, 0));
+
+ compiler->mode32 = 1;
+
+ if (dst_r == TMP_FREG)
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
+ return SLJIT_SUCCESS;
+ }
+
+ if (!FAST_IS_REG(src)) {
+ EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
+ src = TMP_REG1;
+ }
+
+ BINARY_IMM32(CMP, 0, src, 0);
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(2);
+ inst[0] = JL_i8;
+ jump_inst1 = inst;
+
+ size1 = compiler->size;
+
+ compiler->mode32 = 0;
+ FAIL_IF(emit_groupf(compiler, CVTSI2SD_x_rm | EX86_SELECT_F2_F3(op) | EX86_SSE2_OP1, dst_r, src, 0));
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(2);
+ inst[0] = JMP_i8;
+ jump_inst2 = inst;
+
+ size2 = compiler->size;
+
+ jump_inst1[1] = U8(size2 - size1);
+
+ if (src != TMP_REG1)
+ EMIT_MOV(compiler, TMP_REG1, 0, src, 0);
+
+ EMIT_MOV(compiler, TMP_REG2, 0, src, 0);
+
+ inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 1, TMP_REG1, 0);
+ FAIL_IF(!inst);
+ inst[1] |= SHR;
+
+ compiler->mode32 = 1;
+ BINARY_IMM32(AND, 1, TMP_REG2, 0);
+
+ compiler->mode32 = 0;
+ inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, TMP_REG2, 0);
+ FAIL_IF(!inst);
+ inst[0] = OR_r_rm;
+
+ FAIL_IF(emit_groupf(compiler, CVTSI2SD_x_rm | EX86_SELECT_F2_F3(op) | EX86_SSE2_OP1, dst_r, TMP_REG1, 0));
+ compiler->mode32 = 1;
+ FAIL_IF(emit_groupf(compiler, ADDSD_x_xm | EX86_SELECT_F2_F3(op) | EX86_SSE2, dst_r, dst_r, 0));
+
+ jump_inst2[1] = U8(compiler->size - size2);
+
+ if (dst_r == TMP_FREG)
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 sljit_emit_fset(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_u8 rex, sljit_s32 is_zero)
+{
+ sljit_u8 *inst;
+ sljit_u32 size;
+
+ if (is_zero) {
+ rex = freg_map[freg] >= 8 ? (REX_R | REX_B) : 0;
+ } else {
+ if (freg_map[freg] >= 8)
+ rex |= REX_R;
+ if (reg_map[TMP_REG1] >= 8)
+ rex |= REX_B;
+ }
+
+ size = (rex != 0) ? 5 : 4;
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
+ FAIL_IF(!inst);
+ INC_SIZE(size);
+
+ *inst++ = GROUP_66;
+ if (rex != 0)
+ *inst++ = rex;
+ inst[0] = GROUP_0F;
+
+ if (is_zero) {
+ inst[1] = PXOR_x_xm;
+ inst[2] = U8(freg_lmap[freg] | (freg_lmap[freg] << 3) | MOD_REG);
+ } else {
+ inst[1] = MOVD_x_rm;
+ inst[2] = U8(reg_lmap[TMP_REG1] | (freg_lmap[freg] << 3) | MOD_REG);
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
+{
+ union {
+ sljit_s32 imm;
+ sljit_f32 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm != 0) {
+ compiler->mode32 = 1;
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, u.imm);
+ }
+
+ return sljit_emit_fset(compiler, freg, 0, u.imm == 0);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
+{
+ union {
+ sljit_sw imm;
+ sljit_f64 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
+
+ u.value = value;
+
+ if (u.imm != 0) {
+ compiler->mode32 = 0;
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, u.imm);
+ }
+
+ return sljit_emit_fset(compiler, freg, REX_W, u.imm == 0);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_u8 *inst;
+ sljit_u32 size;
+ sljit_u8 rex = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ if (!(op & SLJIT_32))
+ rex = REX_W;
+
+ if (freg_map[freg] >= 8)
+ rex |= REX_R;
+
+ if (reg_map[reg] >= 8)
+ rex |= REX_B;
+
+ size = (rex != 0) ? 5 : 4;
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
+ FAIL_IF(!inst);
+ INC_SIZE(size);
+
+ *inst++ = GROUP_66;
+ if (rex != 0)
+ *inst++ = rex;
+ inst[0] = GROUP_0F;
+ inst[1] = GET_OPCODE(op) == SLJIT_COPY_TO_F64 ? MOVD_x_rm : MOVD_rm_x;
+ inst[2] = U8(reg_lmap[reg] | (freg_lmap[freg] << 3) | MOD_REG);
+
+ return SLJIT_SUCCESS;
+}
+
static sljit_s32 skip_frames_before_return(struct sljit_compiler *compiler)
{
sljit_s32 tmp, size;
/* Don't adjust shadow stack if it isn't enabled. */
- if (!cpu_has_shadow_stack ())
+ if (!cpu_has_shadow_stack())
return SLJIT_SUCCESS;
size = compiler->local_size;
tmp = compiler->scratches;
if (tmp >= SLJIT_FIRST_SAVED_REG)
- size += (tmp - SLJIT_FIRST_SAVED_REG + 1) * sizeof(sljit_uw);
+ size += (tmp - SLJIT_FIRST_SAVED_REG + 1) * SSIZE_OF(sw);
tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
if (SLJIT_S0 >= tmp)
- size += (SLJIT_S0 - tmp + 1) * sizeof(sljit_uw);
+ size += (SLJIT_S0 - tmp + 1) * SSIZE_OF(sw);
- return adjust_shadow_stack(compiler, SLJIT_UNUSED, 0, SLJIT_SP, size);
+ return adjust_shadow_stack(compiler, SLJIT_MEM1(SLJIT_SP), size);
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
index 515d98aefd..c2c0421349 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
@@ -24,13 +24,15 @@
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#if defined(__has_feature)
+#if __has_feature(memory_sanitizer)
+#include <sanitizer/msan_interface.h>
+#endif /* __has_feature(memory_sanitizer) */
+#endif /* defined(__has_feature) */
+
SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
{
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- return "x86" SLJIT_CPUINFO " ABI:fastcall";
-#else
return "x86" SLJIT_CPUINFO;
-#endif
}
/*
@@ -65,33 +67,33 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
15 - R15
*/
+#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
+#define TMP_FREG (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
-/* Last register + 1. */
-#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 3] = {
- 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 7, 6, 3, 4, 5
+ 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 5, 7, 6, 4, 3
+};
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 0
};
#define CHECK_EXTRA_REGS(p, w, do) \
if (p >= SLJIT_R3 && p <= SLJIT_S3) { \
- if (p <= compiler->scratches) \
- w = compiler->saveds_offset - ((p) - SLJIT_R2) * (sljit_sw)sizeof(sljit_sw); \
- else \
- w = compiler->locals_offset + ((p) - SLJIT_S2) * (sljit_sw)sizeof(sljit_sw); \
+ w = (2 * SSIZE_OF(sw)) + ((p) - SLJIT_R3) * SSIZE_OF(sw); \
p = SLJIT_MEM1(SLJIT_SP); \
do; \
}
#else /* SLJIT_CONFIG_X86_32 */
-/* Last register + 1. */
-#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
/* Note: r12 & 0x7 == 0b100, which decoded as SIB byte present
- Note: avoid to use r12 and r13 for memory addessing
+ Note: avoid to use r12 and r13 for memory addressing
therefore r12 is better to be a higher saved register. */
#ifndef _WIN64
/* Args: rdi(=7), rsi(=6), rdx(=2), rcx(=1), r8, r9. Scratches: rax(=0), r10, r11 */
@@ -100,7 +102,7 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = {
};
/* low-map. reg_map & 0x7. */
static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
- 0, 0, 6, 7, 1, 0, 3, 2, 4, 5, 5, 6, 7, 3, 4, 2, 1
+ 0, 0, 6, 7, 1, 0, 3, 2, 4, 5, 5, 6, 7, 3, 4, 2, 1
};
#else
/* Args: rcx(=1), rdx(=2), r8, r9. Scratches: rax(=0), r10, r11 */
@@ -114,12 +116,12 @@ static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
#endif
/* Args: xmm0-xmm3 */
-static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
- 4, 0, 1, 2, 3, 5, 6
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2] = {
+ 0, 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 4
};
/* low-map. freg_map & 0x7. */
-static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
- 4, 0, 1, 2, 3, 5, 6
+static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2] = {
+ 0, 0, 1, 2, 3, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 4
};
#define REX_W 0x48
@@ -143,153 +145,255 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#endif /* SLJIT_CONFIG_X86_32 */
-#define TMP_FREG (0)
+#define U8(v) ((sljit_u8)(v))
/* Size flags for emit_x86_instruction: */
-#define EX86_BIN_INS 0x0010
-#define EX86_SHIFT_INS 0x0020
-#define EX86_REX 0x0040
-#define EX86_NO_REXW 0x0080
-#define EX86_BYTE_ARG 0x0100
-#define EX86_HALF_ARG 0x0200
-#define EX86_PREF_66 0x0400
-#define EX86_PREF_F2 0x0800
-#define EX86_PREF_F3 0x1000
-#define EX86_SSE2_OP1 0x2000
-#define EX86_SSE2_OP2 0x4000
+#define EX86_BIN_INS ((sljit_uw)0x000010)
+#define EX86_SHIFT_INS ((sljit_uw)0x000020)
+#define EX86_BYTE_ARG ((sljit_uw)0x000040)
+#define EX86_HALF_ARG ((sljit_uw)0x000080)
+/* Size flags for both emit_x86_instruction and emit_vex_instruction: */
+#define EX86_REX ((sljit_uw)0x000100)
+#define EX86_NO_REXW ((sljit_uw)0x000200)
+#define EX86_PREF_66 ((sljit_uw)0x000400)
+#define EX86_PREF_F2 ((sljit_uw)0x000800)
+#define EX86_PREF_F3 ((sljit_uw)0x001000)
+#define EX86_SSE2_OP1 ((sljit_uw)0x002000)
+#define EX86_SSE2_OP2 ((sljit_uw)0x004000)
#define EX86_SSE2 (EX86_SSE2_OP1 | EX86_SSE2_OP2)
+#define EX86_VEX_EXT ((sljit_uw)0x008000)
+/* Op flags for emit_vex_instruction: */
+#define VEX_OP_0F38 ((sljit_uw)0x010000)
+#define VEX_OP_0F3A ((sljit_uw)0x020000)
+#define VEX_SSE2_OPV ((sljit_uw)0x040000)
+#define VEX_AUTO_W ((sljit_uw)0x080000)
+#define VEX_W ((sljit_uw)0x100000)
+#define VEX_256 ((sljit_uw)0x200000)
+
+#define EX86_SELECT_66(op) (((op) & SLJIT_32) ? 0 : EX86_PREF_66)
+#define EX86_SELECT_F2_F3(op) (((op) & SLJIT_32) ? EX86_PREF_F3 : EX86_PREF_F2)
/* --------------------------------------------------------------------- */
-/* Instrucion forms */
+/* Instruction forms */
/* --------------------------------------------------------------------- */
-#define ADD (/* BINARY */ 0 << 3)
-#define ADD_EAX_i32 0x05
-#define ADD_r_rm 0x03
-#define ADD_rm_r 0x01
-#define ADDSD_x_xm 0x58
-#define ADC (/* BINARY */ 2 << 3)
-#define ADC_EAX_i32 0x15
-#define ADC_r_rm 0x13
-#define ADC_rm_r 0x11
-#define AND (/* BINARY */ 4 << 3)
-#define AND_EAX_i32 0x25
-#define AND_r_rm 0x23
-#define AND_rm_r 0x21
-#define ANDPD_x_xm 0x54
-#define BSR_r_rm (/* GROUP_0F */ 0xbd)
-#define CALL_i32 0xe8
-#define CALL_rm (/* GROUP_FF */ 2 << 3)
-#define CDQ 0x99
-#define CMOVE_r_rm (/* GROUP_0F */ 0x44)
-#define CMP (/* BINARY */ 7 << 3)
-#define CMP_EAX_i32 0x3d
-#define CMP_r_rm 0x3b
-#define CMP_rm_r 0x39
-#define CVTPD2PS_x_xm 0x5a
-#define CVTSI2SD_x_rm 0x2a
-#define CVTTSD2SI_r_xm 0x2c
-#define DIV (/* GROUP_F7 */ 6 << 3)
-#define DIVSD_x_xm 0x5e
-#define FSTPS 0xd9
-#define FSTPD 0xdd
-#define INT3 0xcc
-#define IDIV (/* GROUP_F7 */ 7 << 3)
-#define IMUL (/* GROUP_F7 */ 5 << 3)
-#define IMUL_r_rm (/* GROUP_0F */ 0xaf)
-#define IMUL_r_rm_i8 0x6b
-#define IMUL_r_rm_i32 0x69
-#define JE_i8 0x74
-#define JNE_i8 0x75
-#define JMP_i8 0xeb
-#define JMP_i32 0xe9
-#define JMP_rm (/* GROUP_FF */ 4 << 3)
-#define LEA_r_m 0x8d
-#define MOV_r_rm 0x8b
-#define MOV_r_i32 0xb8
-#define MOV_rm_r 0x89
-#define MOV_rm_i32 0xc7
-#define MOV_rm8_i8 0xc6
-#define MOV_rm8_r8 0x88
-#define MOVSD_x_xm 0x10
-#define MOVSD_xm_x 0x11
-#define MOVSXD_r_rm 0x63
-#define MOVSX_r_rm8 (/* GROUP_0F */ 0xbe)
-#define MOVSX_r_rm16 (/* GROUP_0F */ 0xbf)
-#define MOVZX_r_rm8 (/* GROUP_0F */ 0xb6)
-#define MOVZX_r_rm16 (/* GROUP_0F */ 0xb7)
-#define MUL (/* GROUP_F7 */ 4 << 3)
-#define MULSD_x_xm 0x59
-#define NEG_rm (/* GROUP_F7 */ 3 << 3)
-#define NOP 0x90
-#define NOT_rm (/* GROUP_F7 */ 2 << 3)
-#define OR (/* BINARY */ 1 << 3)
-#define OR_r_rm 0x0b
-#define OR_EAX_i32 0x0d
-#define OR_rm_r 0x09
-#define OR_rm8_r8 0x08
-#define POP_r 0x58
-#define POP_rm 0x8f
-#define POPF 0x9d
-#define PREFETCH 0x18
-#define PUSH_i32 0x68
-#define PUSH_r 0x50
-#define PUSH_rm (/* GROUP_FF */ 6 << 3)
-#define PUSHF 0x9c
-#define RET_near 0xc3
-#define RET_i16 0xc2
-#define SBB (/* BINARY */ 3 << 3)
-#define SBB_EAX_i32 0x1d
-#define SBB_r_rm 0x1b
-#define SBB_rm_r 0x19
-#define SAR (/* SHIFT */ 7 << 3)
-#define SHL (/* SHIFT */ 4 << 3)
-#define SHR (/* SHIFT */ 5 << 3)
-#define SUB (/* BINARY */ 5 << 3)
-#define SUB_EAX_i32 0x2d
-#define SUB_r_rm 0x2b
-#define SUB_rm_r 0x29
-#define SUBSD_x_xm 0x5c
-#define TEST_EAX_i32 0xa9
-#define TEST_rm_r 0x85
-#define UCOMISD_x_xm 0x2e
-#define UNPCKLPD_x_xm 0x14
-#define XCHG_EAX_r 0x90
-#define XCHG_r_rm 0x87
-#define XOR (/* BINARY */ 6 << 3)
-#define XOR_EAX_i32 0x35
-#define XOR_r_rm 0x33
-#define XOR_rm_r 0x31
-#define XORPD_x_xm 0x57
-
-#define GROUP_0F 0x0f
-#define GROUP_F7 0xf7
-#define GROUP_FF 0xff
-#define GROUP_BINARY_81 0x81
-#define GROUP_BINARY_83 0x83
-#define GROUP_SHIFT_1 0xd1
-#define GROUP_SHIFT_N 0xc1
-#define GROUP_SHIFT_CL 0xd3
-
-#define MOD_REG 0xc0
-#define MOD_DISP8 0x40
-
-#define INC_SIZE(s) (*inst++ = (s), compiler->size += (s))
-
-#define PUSH_REG(r) (*inst++ = (PUSH_r + (r)))
-#define POP_REG(r) (*inst++ = (POP_r + (r)))
-#define RET() (*inst++ = (RET_near))
-#define RET_I16(n) (*inst++ = (RET_i16), *inst++ = n, *inst++ = 0)
-/* r32, r/m32 */
-#define MOV_RM(mod, reg, rm) (*inst++ = (MOV_r_rm), *inst++ = (mod) << 6 | (reg) << 3 | (rm))
+#define ADD (/* BINARY */ 0 << 3)
+#define ADD_EAX_i32 0x05
+#define ADD_r_rm 0x03
+#define ADD_rm_r 0x01
+#define ADDSD_x_xm 0x58
+#define ADC (/* BINARY */ 2 << 3)
+#define ADC_EAX_i32 0x15
+#define ADC_r_rm 0x13
+#define ADC_rm_r 0x11
+#define AND (/* BINARY */ 4 << 3)
+#define AND_EAX_i32 0x25
+#define AND_r_rm 0x23
+#define AND_rm_r 0x21
+#define ANDPD_x_xm 0x54
+#define BSR_r_rm (/* GROUP_0F */ 0xbd)
+#define BSF_r_rm (/* GROUP_0F */ 0xbc)
+#define BSWAP_r (/* GROUP_0F */ 0xc8)
+#define CALL_i32 0xe8
+#define CALL_rm (/* GROUP_FF */ 2 << 3)
+#define CDQ 0x99
+#define CMOVE_r_rm (/* GROUP_0F */ 0x44)
+#define CMP (/* BINARY */ 7 << 3)
+#define CMP_EAX_i32 0x3d
+#define CMP_r_rm 0x3b
+#define CMP_rm_r 0x39
+#define CMPS_x_xm 0xc2
+#define CMPXCHG_rm_r 0xb1
+#define CMPXCHG_rm8_r 0xb0
+#define CVTPD2PS_x_xm 0x5a
+#define CVTPS2PD_x_xm 0x5a
+#define CVTSI2SD_x_rm 0x2a
+#define CVTTSD2SI_r_xm 0x2c
+#define DIV (/* GROUP_F7 */ 6 << 3)
+#define DIVSD_x_xm 0x5e
+#define EXTRACTPS_x_xm 0x17
+#define FLDS 0xd9
+#define FLDL 0xdd
+#define FSTPS 0xd9
+#define FSTPD 0xdd
+#define INSERTPS_x_xm 0x21
+#define INT3 0xcc
+#define IDIV (/* GROUP_F7 */ 7 << 3)
+#define IMUL (/* GROUP_F7 */ 5 << 3)
+#define IMUL_r_rm (/* GROUP_0F */ 0xaf)
+#define IMUL_r_rm_i8 0x6b
+#define IMUL_r_rm_i32 0x69
+#define JL_i8 0x7c
+#define JE_i8 0x74
+#define JNC_i8 0x73
+#define JNE_i8 0x75
+#define JMP_i8 0xeb
+#define JMP_i32 0xe9
+#define JMP_rm (/* GROUP_FF */ 4 << 3)
+#define LEA_r_m 0x8d
+#define LOOP_i8 0xe2
+#define LZCNT_r_rm (/* GROUP_F3 */ /* GROUP_0F */ 0xbd)
+#define MOV_r_rm 0x8b
+#define MOV_r_i32 0xb8
+#define MOV_rm_r 0x89
+#define MOV_rm_i32 0xc7
+#define MOV_rm8_i8 0xc6
+#define MOV_rm8_r8 0x88
+#define MOVAPS_x_xm 0x28
+#define MOVAPS_xm_x 0x29
+#define MOVD_x_rm 0x6e
+#define MOVD_rm_x 0x7e
+#define MOVDDUP_x_xm 0x12
+#define MOVDQA_x_xm 0x6f
+#define MOVDQA_xm_x 0x7f
+#define MOVHLPS_x_x 0x12
+#define MOVHPD_m_x 0x17
+#define MOVHPD_x_m 0x16
+#define MOVLHPS_x_x 0x16
+#define MOVLPD_m_x 0x13
+#define MOVLPD_x_m 0x12
+#define MOVMSKPS_r_x (/* GROUP_0F */ 0x50)
+#define MOVQ_x_xm (/* GROUP_0F */ 0x7e)
+#define MOVSD_x_xm 0x10
+#define MOVSD_xm_x 0x11
+#define MOVSHDUP_x_xm 0x16
+#define MOVSXD_r_rm 0x63
+#define MOVSX_r_rm8 (/* GROUP_0F */ 0xbe)
+#define MOVSX_r_rm16 (/* GROUP_0F */ 0xbf)
+#define MOVUPS_x_xm 0x10
+#define MOVZX_r_rm8 (/* GROUP_0F */ 0xb6)
+#define MOVZX_r_rm16 (/* GROUP_0F */ 0xb7)
+#define MUL (/* GROUP_F7 */ 4 << 3)
+#define MULSD_x_xm 0x59
+#define NEG_rm (/* GROUP_F7 */ 3 << 3)
+#define NOP 0x90
+#define NOT_rm (/* GROUP_F7 */ 2 << 3)
+#define OR (/* BINARY */ 1 << 3)
+#define OR_r_rm 0x0b
+#define OR_EAX_i32 0x0d
+#define OR_rm_r 0x09
+#define OR_rm8_r8 0x08
+#define ORPD_x_xm 0x56
+#define PACKSSWB_x_xm (/* GROUP_0F */ 0x63)
+#define PAND_x_xm 0xdb
+#define PCMPEQD_x_xm 0x76
+#define PINSRB_x_rm_i8 0x20
+#define PINSRW_x_rm_i8 0xc4
+#define PINSRD_x_rm_i8 0x22
+#define PEXTRB_rm_x_i8 0x14
+#define PEXTRW_rm_x_i8 0x15
+#define PEXTRD_rm_x_i8 0x16
+#define PMOVMSKB_r_x (/* GROUP_0F */ 0xd7)
+#define PMOVSXBD_x_xm 0x21
+#define PMOVSXBQ_x_xm 0x22
+#define PMOVSXBW_x_xm 0x20
+#define PMOVSXDQ_x_xm 0x25
+#define PMOVSXWD_x_xm 0x23
+#define PMOVSXWQ_x_xm 0x24
+#define PMOVZXBD_x_xm 0x31
+#define PMOVZXBQ_x_xm 0x32
+#define PMOVZXBW_x_xm 0x30
+#define PMOVZXDQ_x_xm 0x35
+#define PMOVZXWD_x_xm 0x33
+#define PMOVZXWQ_x_xm 0x34
+#define POP_r 0x58
+#define POP_rm 0x8f
+#define POPF 0x9d
+#define POR_x_xm 0xeb
+#define PREFETCH 0x18
+#define PSHUFB_x_xm 0x00
+#define PSHUFD_x_xm 0x70
+#define PSHUFLW_x_xm 0x70
+#define PSRLDQ_x 0x73
+#define PSLLD_x_i8 0x72
+#define PSLLQ_x_i8 0x73
+#define PUSH_i32 0x68
+#define PUSH_r 0x50
+#define PUSH_rm (/* GROUP_FF */ 6 << 3)
+#define PUSHF 0x9c
+#define PXOR_x_xm 0xef
+#define ROL (/* SHIFT */ 0 << 3)
+#define ROR (/* SHIFT */ 1 << 3)
+#define RET_near 0xc3
+#define RET_i16 0xc2
+#define SBB (/* BINARY */ 3 << 3)
+#define SBB_EAX_i32 0x1d
+#define SBB_r_rm 0x1b
+#define SBB_rm_r 0x19
+#define SAR (/* SHIFT */ 7 << 3)
+#define SHL (/* SHIFT */ 4 << 3)
+#define SHLD (/* GROUP_0F */ 0xa5)
+#define SHRD (/* GROUP_0F */ 0xad)
+#define SHR (/* SHIFT */ 5 << 3)
+#define SHUFPS_x_xm 0xc6
+#define SUB (/* BINARY */ 5 << 3)
+#define SUB_EAX_i32 0x2d
+#define SUB_r_rm 0x2b
+#define SUB_rm_r 0x29
+#define SUBSD_x_xm 0x5c
+#define TEST_EAX_i32 0xa9
+#define TEST_rm_r 0x85
+#define TZCNT_r_rm (/* GROUP_F3 */ /* GROUP_0F */ 0xbc)
+#define UCOMISD_x_xm 0x2e
+#define UNPCKLPD_x_xm 0x14
+#define UNPCKLPS_x_xm 0x14
+#define VBROADCASTSD_x_xm 0x19
+#define VBROADCASTSS_x_xm 0x18
+#define VEXTRACTF128_x_ym 0x19
+#define VEXTRACTI128_x_ym 0x39
+#define VINSERTF128_y_y_xm 0x18
+#define VINSERTI128_y_y_xm 0x38
+#define VPBROADCASTB_x_xm 0x78
+#define VPBROADCASTD_x_xm 0x58
+#define VPBROADCASTQ_x_xm 0x59
+#define VPBROADCASTW_x_xm 0x79
+#define VPERMPD_y_ym 0x01
+#define VPERMQ_y_ym 0x00
+#define XCHG_EAX_r 0x90
+#define XCHG_r_rm 0x87
+#define XOR (/* BINARY */ 6 << 3)
+#define XOR_EAX_i32 0x35
+#define XOR_r_rm 0x33
+#define XOR_rm_r 0x31
+#define XORPD_x_xm 0x57
+
+#define GROUP_0F 0x0f
+#define GROUP_66 0x66
+#define GROUP_F3 0xf3
+#define GROUP_F7 0xf7
+#define GROUP_FF 0xff
+#define GROUP_BINARY_81 0x81
+#define GROUP_BINARY_83 0x83
+#define GROUP_SHIFT_1 0xd1
+#define GROUP_SHIFT_N 0xc1
+#define GROUP_SHIFT_CL 0xd3
+#define GROUP_LOCK 0xf0
+
+#define MOD_REG 0xc0
+#define MOD_DISP8 0x40
+
+#define INC_SIZE(s) (*inst++ = U8(s), compiler->size += (s))
+
+#define PUSH_REG(r) (*inst++ = U8(PUSH_r + (r)))
+#define POP_REG(r) (*inst++ = U8(POP_r + (r)))
+#define RET() (*inst++ = RET_near)
+#define RET_I16(n) (*inst++ = RET_i16, *inst++ = U8(n), *inst++ = 0)
/* Multithreading does not affect these static variables, since they store
built-in CPU features. Therefore they can be overwritten by different threads
if they detect the CPU features in the same time. */
+#define CPU_FEATURE_DETECTED 0x001
#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
-static sljit_s32 cpu_has_sse2 = -1;
+#define CPU_FEATURE_SSE2 0x002
#endif
-static sljit_s32 cpu_has_cmov = -1;
+#define CPU_FEATURE_SSE41 0x004
+#define CPU_FEATURE_LZCNT 0x008
+#define CPU_FEATURE_TZCNT 0x010
+#define CPU_FEATURE_CMOV 0x020
+#define CPU_FEATURE_AVX 0x040
+#define CPU_FEATURE_AVX2 0x080
+
+static sljit_u32 cpu_feature_list = 0;
#ifdef _WIN32_WCE
#include <cmnintrin.h>
@@ -320,82 +424,167 @@ static SLJIT_INLINE void sljit_unaligned_store_sw(void *addr, sljit_sw value)
/* Utility functions */
/******************************************************/
-static void get_cpu_features(void)
+static void execute_cpu_id(sljit_u32 info[4])
{
- sljit_u32 features;
-
#if defined(_MSC_VER) && _MSC_VER >= 1400
- int CPUInfo[4];
- __cpuid(CPUInfo, 1);
- features = (sljit_u32)CPUInfo[3];
+ __cpuidex((int*)info, (int)info[0], (int)info[2]);
-#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C)
+#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) || defined(__TINYC__)
/* AT&T syntax. */
__asm__ (
- "movl $0x1, %%eax\n"
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- /* On x86-32, there is no red zone, so this
- should work (no need for a local variable). */
- "push %%ebx\n"
-#endif
+ "movl %0, %%esi\n"
+ "movl (%%esi), %%eax\n"
+ "movl 8(%%esi), %%ecx\n"
+ "pushl %%ebx\n"
"cpuid\n"
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- "pop %%ebx\n"
-#endif
- "movl %%edx, %0\n"
- : "=g" (features)
+ "movl %%eax, (%%esi)\n"
+ "movl %%ebx, 4(%%esi)\n"
+ "popl %%ebx\n"
+ "movl %%ecx, 8(%%esi)\n"
+ "movl %%edx, 12(%%esi)\n"
+#else /* !SLJIT_CONFIG_X86_32 */
+ "movq %0, %%rsi\n"
+ "movl (%%rsi), %%eax\n"
+ "movl 8(%%rsi), %%ecx\n"
+ "cpuid\n"
+ "movl %%eax, (%%rsi)\n"
+ "movl %%ebx, 4(%%rsi)\n"
+ "movl %%ecx, 8(%%rsi)\n"
+ "movl %%edx, 12(%%rsi)\n"
+#endif /* SLJIT_CONFIG_X86_32 */
:
+ : "r" (info)
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- : "%eax", "%ecx", "%edx"
-#else
- : "%rax", "%rbx", "%rcx", "%rdx"
-#endif
+ : "memory", "eax", "ecx", "edx", "esi"
+#else /* !SLJIT_CONFIG_X86_32 */
+ : "memory", "rax", "rbx", "rcx", "rdx", "rsi"
+#endif /* SLJIT_CONFIG_X86_32 */
);
-#else /* _MSC_VER && _MSC_VER >= 1400 */
+#else /* _MSC_VER < 1400 */
/* Intel syntax. */
__asm {
- mov eax, 1
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ mov esi, info
+ mov eax, [esi]
+ mov ecx, [esi + 8]
+ cpuid
+ mov [esi], eax
+ mov [esi + 4], ebx
+ mov [esi + 8], ecx
+ mov [esi + 12], edx
+#else /* !SLJIT_CONFIG_X86_32 */
+ mov rsi, info
+ mov eax, [rsi]
+ mov ecx, [rsi + 8]
cpuid
- mov features, edx
+ mov [rsi], eax
+ mov [rsi + 4], ebx
+ mov [rsi + 8], ecx
+ mov [rsi + 12], edx
+#endif /* SLJIT_CONFIG_X86_32 */
}
#endif /* _MSC_VER && _MSC_VER >= 1400 */
+#if defined(__has_feature)
+#if __has_feature(memory_sanitizer)
+__msan_unpoison(info, 4 * sizeof(sljit_u32));
+#endif /* __has_feature(memory_sanitizer) */
+#endif /* defined(__has_feature) */
+
+}
+
+static void get_cpu_features(void)
+{
+ sljit_u32 feature_list = CPU_FEATURE_DETECTED;
+ sljit_u32 info[4];
+ sljit_u32 max_id;
+
+ info[0] = 0;
+ execute_cpu_id(info);
+ max_id = info[0];
+
+ if (max_id >= 7) {
+ info[0] = 7;
+ info[2] = 0;
+ execute_cpu_id(info);
+
+ if (info[1] & 0x8)
+ feature_list |= CPU_FEATURE_TZCNT;
+ if (info[1] & 0x20)
+ feature_list |= CPU_FEATURE_AVX2;
+ }
+
+ if (max_id >= 1) {
+ info[0] = 1;
+ execute_cpu_id(info);
+
+ if (info[2] & 0x80000)
+ feature_list |= CPU_FEATURE_SSE41;
+ if (info[2] & 0x10000000)
+ feature_list |= CPU_FEATURE_AVX;
#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
- cpu_has_sse2 = (features >> 26) & 0x1;
+ if (info[3] & 0x4000000)
+ feature_list |= CPU_FEATURE_SSE2;
#endif
- cpu_has_cmov = (features >> 15) & 0x1;
+ if (info[3] & 0x8000)
+ feature_list |= CPU_FEATURE_CMOV;
+ }
+
+ info[0] = 0x80000001;
+ info[2] = 0; /* Silences an incorrect compiler warning. */
+ execute_cpu_id(info);
+
+ if (info[2] & 0x20)
+ feature_list |= CPU_FEATURE_LZCNT;
+
+ cpu_feature_list = feature_list;
}
-static sljit_u8 get_jump_code(sljit_s32 type)
+static sljit_u8 get_jump_code(sljit_uw type)
{
switch (type) {
case SLJIT_EQUAL:
- case SLJIT_EQUAL_F64:
+ case SLJIT_ATOMIC_STORED:
+ case SLJIT_F_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
return 0x84 /* je */;
case SLJIT_NOT_EQUAL:
- case SLJIT_NOT_EQUAL_F64:
+ case SLJIT_ATOMIC_NOT_STORED:
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
return 0x85 /* jne */;
case SLJIT_LESS:
- case SLJIT_LESS_F64:
+ case SLJIT_CARRY:
+ case SLJIT_F_LESS:
+ case SLJIT_UNORDERED_OR_LESS:
+ case SLJIT_UNORDERED_OR_GREATER:
return 0x82 /* jc */;
case SLJIT_GREATER_EQUAL:
- case SLJIT_GREATER_EQUAL_F64:
+ case SLJIT_NOT_CARRY:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
return 0x83 /* jae */;
case SLJIT_GREATER:
- case SLJIT_GREATER_F64:
+ case SLJIT_F_GREATER:
+ case SLJIT_ORDERED_LESS:
+ case SLJIT_ORDERED_GREATER:
return 0x87 /* jnbe */;
case SLJIT_LESS_EQUAL:
- case SLJIT_LESS_EQUAL_F64:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
return 0x86 /* jbe */;
case SLJIT_SIG_LESS:
@@ -416,10 +605,12 @@ static sljit_u8 get_jump_code(sljit_s32 type)
case SLJIT_NOT_OVERFLOW:
return 0x81 /* jno */;
- case SLJIT_UNORDERED_F64:
+ case SLJIT_UNORDERED:
+ case SLJIT_ORDERED_EQUAL: /* NaN. */
return 0x8a /* jp */;
- case SLJIT_ORDERED_F64:
+ case SLJIT_ORDERED:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL: /* Not NaN. */
return 0x8b /* jpo */;
}
return 0;
@@ -434,22 +625,22 @@ static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, slji
static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_u8 *code, sljit_sw executable_offset)
{
- sljit_s32 type = jump->flags >> TYPE_SHIFT;
+ sljit_uw type = jump->flags >> TYPE_SHIFT;
sljit_s32 short_jump;
sljit_uw label_addr;
if (jump->flags & JUMP_LABEL)
label_addr = (sljit_uw)(code + jump->u.label->size);
else
- label_addr = jump->u.target - executable_offset;
-
- short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127;
+ label_addr = jump->u.target - (sljit_uw)executable_offset;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if ((sljit_sw)(label_addr - (jump->addr + 1)) > HALFWORD_MAX || (sljit_sw)(label_addr - (jump->addr + 1)) < HALFWORD_MIN)
+ if ((sljit_sw)(label_addr - (jump->addr + 2)) > HALFWORD_MAX || (sljit_sw)(label_addr - (jump->addr + 6)) < HALFWORD_MIN)
return generate_far_jump_code(jump, code_ptr);
#endif
+ short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127;
+
if (type == SLJIT_JUMP) {
if (short_jump)
*code_ptr++ = JMP_i8;
@@ -463,7 +654,7 @@ static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code
jump->addr++;
}
else if (short_jump) {
- *code_ptr++ = get_jump_code(type) - 0x10;
+ *code_ptr++ = U8(get_jump_code(type) - 0x10);
jump->addr++;
}
else {
@@ -492,7 +683,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
sljit_u8 *buf_end;
sljit_u8 len;
sljit_sw executable_offset;
- sljit_sw jump_addr;
+ sljit_uw jump_addr;
struct sljit_label *label;
struct sljit_jump *jump;
@@ -530,7 +721,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
switch (*buf_ptr) {
case 0:
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
break;
case 1:
@@ -575,32 +766,33 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
jump = compiler->jumps;
while (jump) {
- jump_addr = jump->addr + executable_offset;
+ if (jump->flags & (PATCH_MB | PATCH_MW)) {
+ if (jump->flags & JUMP_LABEL)
+ jump_addr = jump->u.label->addr;
+ else
+ jump_addr = jump->u.target;
- if (jump->flags & PATCH_MB) {
- SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) >= -128 && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) <= 127);
- *(sljit_u8*)jump->addr = (sljit_u8)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8)));
- } else if (jump->flags & PATCH_MW) {
- if (jump->flags & JUMP_LABEL) {
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_sw))));
-#else
- SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX);
- sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))));
-#endif
- }
- else {
+ jump_addr -= jump->addr + (sljit_uw)executable_offset;
+
+ if (jump->flags & PATCH_MB) {
+ jump_addr -= sizeof(sljit_s8);
+ SLJIT_ASSERT((sljit_sw)jump_addr >= -128 && (sljit_sw)jump_addr <= 127);
+ *(sljit_u8*)jump->addr = U8(jump_addr);
+ } else {
+ jump_addr -= sizeof(sljit_s32);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_sw))));
+ sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump_addr);
#else
- SLJIT_ASSERT((sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX);
- sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.target - (jump_addr + sizeof(sljit_s32))));
+ SLJIT_ASSERT((sljit_sw)jump_addr >= HALFWORD_MIN && (sljit_sw)jump_addr <= HALFWORD_MAX);
+ sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)jump_addr);
#endif
}
}
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- else if (jump->flags & PATCH_MD)
- sljit_unaligned_store_sw((void*)jump->addr, jump->u.label->addr);
+ else if (jump->flags & PATCH_MD) {
+ SLJIT_ASSERT(jump->flags & JUMP_LABEL);
+ sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump->u.label->addr);
+ }
#endif
jump = jump->next;
@@ -626,7 +818,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
compiler->error = SLJIT_ERR_COMPILED;
compiler->executable_offset = executable_offset;
- compiler->executable_size = code_ptr - code;
+ compiler->executable_size = (sljit_uw)(code_ptr - code);
code = (sljit_u8*)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
@@ -639,11 +831,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
switch (feature_type) {
case SLJIT_HAS_FPU:
#ifdef SLJIT_IS_FPU_AVAILABLE
- return SLJIT_IS_FPU_AVAILABLE;
+ return (SLJIT_IS_FPU_AVAILABLE) != 0;
#elif (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
- if (cpu_has_sse2 == -1)
+ if (cpu_feature_list == 0)
get_cpu_features();
- return cpu_has_sse2;
+ return (cpu_feature_list & CPU_FEATURE_SSE2) != 0;
#else /* SLJIT_DETECT_SSE2 */
return 1;
#endif /* SLJIT_DETECT_SSE2 */
@@ -651,48 +843,112 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
case SLJIT_HAS_VIRTUAL_REGISTERS:
return 1;
-#endif
+#endif /* SLJIT_CONFIG_X86_32 */
case SLJIT_HAS_CLZ:
+ if (cpu_feature_list == 0)
+ get_cpu_features();
+
+ return (cpu_feature_list & CPU_FEATURE_LZCNT) ? 1 : 2;
+
+ case SLJIT_HAS_CTZ:
+ if (cpu_feature_list == 0)
+ get_cpu_features();
+
+ return (cpu_feature_list & CPU_FEATURE_TZCNT) ? 1 : 2;
+
case SLJIT_HAS_CMOV:
- if (cpu_has_cmov == -1)
+ if (cpu_feature_list == 0)
get_cpu_features();
- return cpu_has_cmov;
+ return (cpu_feature_list & CPU_FEATURE_CMOV) != 0;
+ case SLJIT_HAS_REV:
+ case SLJIT_HAS_ROT:
case SLJIT_HAS_PREFETCH:
+ case SLJIT_HAS_COPY_F32:
+ case SLJIT_HAS_COPY_F64:
+ case SLJIT_HAS_ATOMIC:
return 1;
- case SLJIT_HAS_SSE2:
-#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
- if (cpu_has_sse2 == -1)
+#if !(defined SLJIT_IS_FPU_AVAILABLE) || SLJIT_IS_FPU_AVAILABLE
+ case SLJIT_HAS_AVX:
+ if (cpu_feature_list == 0)
get_cpu_features();
- return cpu_has_sse2;
-#else
- return 1;
-#endif
-
+ return (cpu_feature_list & CPU_FEATURE_AVX) != 0;
+ case SLJIT_HAS_AVX2:
+ if (cpu_feature_list == 0)
+ get_cpu_features();
+ return (cpu_feature_list & CPU_FEATURE_AVX2) != 0;
+ case SLJIT_HAS_SIMD:
+ if (cpu_feature_list == 0)
+ get_cpu_features();
+ return (cpu_feature_list & CPU_FEATURE_SSE41) != 0;
+#endif /* SLJIT_IS_FPU_AVAILABLE */
default:
return 0;
}
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
+{
+ switch (type) {
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ return 2;
+ }
+
+ return 0;
+}
+
/* --------------------------------------------------------------------- */
/* Operators */
/* --------------------------------------------------------------------- */
#define BINARY_OPCODE(opcode) (((opcode ## _EAX_i32) << 24) | ((opcode ## _r_rm) << 16) | ((opcode ## _rm_r) << 8) | (opcode))
-static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
- sljit_u32 op_types,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src1, sljit_sw src1w,
- sljit_s32 src2, sljit_sw src2w);
+#define BINARY_IMM32(op_imm, immw, arg, argw) \
+ do { \
+ inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \
+ FAIL_IF(!inst); \
+ *(inst + 1) |= (op_imm); \
+ } while (0)
-static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
- sljit_u32 op_types,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src1, sljit_sw src1w,
- sljit_s32 src2, sljit_sw src2w);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+
+#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
+ do { \
+ if (IS_HALFWORD(immw) || compiler->mode32) { \
+ BINARY_IMM32(op_imm, immw, arg, argw); \
+ } \
+ else { \
+ FAIL_IF(emit_load_imm64(compiler, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, immw)); \
+ inst = emit_x86_instruction(compiler, 1, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, 0, arg, argw); \
+ FAIL_IF(!inst); \
+ *inst = (op_mr); \
+ } \
+ } while (0)
+
+#define BINARY_EAX_IMM(op_eax_imm, immw) \
+ FAIL_IF(emit_do_imm32(compiler, (!compiler->mode32) ? REX_W : 0, (op_eax_imm), immw))
+
+#else /* !SLJIT_CONFIG_X86_64 */
+
+#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
+ BINARY_IMM32(op_imm, immw, arg, argw)
+
+#define BINARY_EAX_IMM(op_eax_imm, immw) \
+ FAIL_IF(emit_do_imm(compiler, (op_eax_imm), immw))
+
+#endif /* SLJIT_CONFIG_X86_64 */
+
+static sljit_s32 emit_byte(struct sljit_compiler *compiler, sljit_u8 byte)
+{
+ sljit_u8 *inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
+ FAIL_IF(!inst);
+ INC_SIZE(1);
+ *inst = byte;
+ return SLJIT_SUCCESS;
+}
static sljit_s32 emit_mov(struct sljit_compiler *compiler,
sljit_s32 dst, sljit_sw dstw,
@@ -701,6 +957,14 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler,
#define EMIT_MOV(compiler, dst, dstw, src, srcw) \
FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw));
+static sljit_s32 emit_groupf(struct sljit_compiler *compiler,
+ sljit_uw op,
+ sljit_s32 dst, sljit_s32 src, sljit_sw srcw);
+
+static sljit_s32 emit_groupf_ext(struct sljit_compiler *compiler,
+ sljit_uw op,
+ sljit_s32 dst, sljit_s32 src, sljit_sw srcw);
+
static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler,
sljit_s32 single, sljit_s32 dst, sljit_sw dstw, sljit_s32 src);
@@ -711,6 +975,10 @@ static sljit_s32 emit_cmp_binary(struct sljit_compiler *compiler,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w);
+static sljit_s32 emit_cmov_generic(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_reg,
+ sljit_s32 src, sljit_sw srcw);
+
static SLJIT_INLINE sljit_s32 emit_endbranch(struct sljit_compiler *compiler)
{
#if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET)
@@ -719,14 +987,14 @@ static SLJIT_INLINE sljit_s32 emit_endbranch(struct sljit_compiler *compiler)
inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
FAIL_IF(!inst);
INC_SIZE(4);
- *inst++ = 0xf3;
- *inst++ = 0x0f;
- *inst++ = 0x1e;
+ inst[0] = GROUP_F3;
+ inst[1] = GROUP_0F;
+ inst[2] = 0x1e;
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- *inst = 0xfb;
-#else
- *inst = 0xfa;
-#endif
+ inst[3] = 0xfb;
+#else /* !SLJIT_CONFIG_X86_32 */
+ inst[3] = 0xfa;
+#endif /* SLJIT_CONFIG_X86_32 */
#else /* !SLJIT_CONFIG_X86_CET */
SLJIT_UNUSED_ARG(compiler);
#endif /* SLJIT_CONFIG_X86_CET */
@@ -749,13 +1017,17 @@ static SLJIT_INLINE sljit_s32 emit_rdssp(struct sljit_compiler *compiler, sljit_
inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
FAIL_IF(!inst);
INC_SIZE(size);
- *inst++ = 0xf3;
+ *inst++ = GROUP_F3;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
*inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : REX_B);
#endif
- *inst++ = 0x0f;
- *inst++ = 0x1e;
- *inst = (0x3 << 6) | (0x1 << 3) | (reg_map[reg] & 0x7);
+ inst[0] = GROUP_0F;
+ inst[1] = 0x1e;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ inst[2] = U8(MOD_REG | (0x1 << 3) | reg_lmap[reg]);
+#else
+ inst[2] = U8(MOD_REG | (0x1 << 3) | reg_map[reg]);
+#endif
return SLJIT_SUCCESS;
}
@@ -773,13 +1045,13 @@ static SLJIT_INLINE sljit_s32 emit_incssp(struct sljit_compiler *compiler, sljit
inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
FAIL_IF(!inst);
INC_SIZE(size);
- *inst++ = 0xf3;
+ *inst++ = GROUP_F3;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
*inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : REX_B);
#endif
- *inst++ = 0x0f;
- *inst++ = 0xae;
- *inst = (0x3 << 6) | (0x5 << 3) | (reg_map[reg] & 0x7);
+ inst[0] = GROUP_0F;
+ inst[1] = 0xae;
+ inst[2] = (0x3 << 6) | (0x5 << 3) | (reg_map[reg] & 0x7);
return SLJIT_SUCCESS;
}
@@ -795,7 +1067,7 @@ static SLJIT_INLINE sljit_s32 cpu_has_shadow_stack(void)
}
static SLJIT_INLINE sljit_s32 adjust_shadow_stack(struct sljit_compiler *compiler,
- sljit_s32 src, sljit_sw srcw, sljit_s32 base, sljit_sw disp)
+ sljit_s32 src, sljit_sw srcw)
{
#if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET) && defined (__SHSTK__)
sljit_u8 *inst, *jz_after_cmp_inst;
@@ -807,25 +1079,7 @@ static SLJIT_INLINE sljit_s32 adjust_shadow_stack(struct sljit_compiler *compile
FAIL_IF(emit_rdssp(compiler, TMP_REG1));
/* Load return address on shadow stack into TMP_REG1. */
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- SLJIT_ASSERT(reg_map[TMP_REG1] == 5);
-
- /* Hand code unsupported "mov 0x0(%ebp),%ebp". */
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
- FAIL_IF(!inst);
- INC_SIZE(3);
- *inst++ = 0x8b;
- *inst++ = 0x6d;
- *inst = 0;
-#else /* !SLJIT_CONFIG_X86_32 */
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(TMP_REG1), 0);
-#endif /* SLJIT_CONFIG_X86_32 */
-
- if (src == SLJIT_UNUSED) {
- /* Return address is on stack. */
- src = SLJIT_MEM1(base);
- srcw = disp;
- }
/* Compare return address against TMP_REG1. */
FAIL_IF(emit_cmp_binary (compiler, TMP_REG1, 0, src, srcw));
@@ -853,16 +1107,14 @@ static SLJIT_INLINE sljit_s32 adjust_shadow_stack(struct sljit_compiler *compile
inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
FAIL_IF(!inst);
INC_SIZE(2);
- *inst++ = JMP_i8;
- *inst = size_before_rdssp_inst - compiler->size;
+ inst[0] = JMP_i8;
+ inst[1] = size_before_rdssp_inst - compiler->size;
*jz_after_cmp_inst = compiler->size - size_jz_after_cmp_inst;
#else /* !SLJIT_CONFIG_X86_CET || !__SHSTK__ */
SLJIT_UNUSED_ARG(compiler);
SLJIT_UNUSED_ARG(src);
SLJIT_UNUSED_ARG(srcw);
- SLJIT_UNUSED_ARG(base);
- SLJIT_UNUSED_ARG(disp);
#endif /* SLJIT_CONFIG_X86_CET && __SHSTK__ */
return SLJIT_SUCCESS;
}
@@ -879,25 +1131,24 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler,
{
sljit_u8* inst;
- SLJIT_ASSERT(dst != SLJIT_UNUSED);
-
if (FAST_IS_REG(src)) {
inst = emit_x86_instruction(compiler, 1, src, 0, dst, dstw);
FAIL_IF(!inst);
*inst = MOV_rm_r;
return SLJIT_SUCCESS;
}
- if (src & SLJIT_IMM) {
+
+ if (src == SLJIT_IMM) {
if (FAST_IS_REG(dst)) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
+ return emit_do_imm(compiler, MOV_r_i32 | reg_map[dst], srcw);
#else
if (!compiler->mode32) {
if (NOT_HALFWORD(srcw))
return emit_load_imm64(compiler, dst, srcw);
}
else
- return emit_do_imm32(compiler, (reg_map[dst] >= 8) ? REX_B : 0, MOV_r_i32 + reg_lmap[dst], srcw);
+ return emit_do_imm32(compiler, (reg_map[dst] >= 8) ? REX_B : 0, U8(MOV_r_i32 | reg_lmap[dst]), srcw);
#endif
}
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -934,11 +1185,32 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler,
return SLJIT_SUCCESS;
}
+static sljit_s32 emit_cmov_generic(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_reg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_u8* inst;
+ sljit_uw size;
+
+ SLJIT_ASSERT(type >= SLJIT_EQUAL && type <= SLJIT_ORDERED_LESS_EQUAL);
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(2);
+ inst[0] = U8(get_jump_code((sljit_uw)type ^ 0x1) - 0x10);
+
+ size = compiler->size;
+ EMIT_MOV(compiler, dst_reg, 0, src, srcw);
+
+ inst[1] = U8(compiler->size - size);
+ return SLJIT_SUCCESS;
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
{
sljit_u8 *inst;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- sljit_s32 size;
+ sljit_uw size;
#endif
CHECK_ERROR();
@@ -946,17 +1218,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
switch (GET_OPCODE(op)) {
case SLJIT_BREAKPOINT:
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- *inst = INT3;
- break;
+ return emit_byte(compiler, INT3);
case SLJIT_NOP:
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- *inst = NOP;
- break;
+ return emit_byte(compiler, NOP);
case SLJIT_LMUL_UW:
case SLJIT_LMUL_SW:
case SLJIT_DIVMOD_UW:
@@ -975,7 +1239,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
&& reg_map[SLJIT_R1] < 7
&& reg_map[TMP_REG1] == 2);
#endif
- compiler->mode32 = op & SLJIT_I32_OP;
+ compiler->mode32 = op & SLJIT_32;
#endif
SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
@@ -997,23 +1261,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
#endif
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- *inst = CDQ;
+ FAIL_IF(emit_byte(compiler, CDQ));
#else
- if (compiler->mode32) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- *inst = CDQ;
- } else {
+ if (!compiler->mode32) {
inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
FAIL_IF(!inst);
INC_SIZE(2);
- *inst++ = REX_W;
- *inst = CDQ;
- }
+ inst[0] = REX_W;
+ inst[1] = CDQ;
+ } else
+ FAIL_IF(emit_byte(compiler, CDQ));
#endif
}
@@ -1021,14 +1278,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
FAIL_IF(!inst);
INC_SIZE(2);
- *inst++ = GROUP_F7;
- *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_map[TMP_REG1] : reg_map[SLJIT_R1]);
-#else
+ inst[0] = GROUP_F7;
+ inst[1] = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_map[TMP_REG1] : reg_map[SLJIT_R1]);
+#else /* !SLJIT_CONFIG_X86_32 */
#ifdef _WIN64
size = (!compiler->mode32 || op >= SLJIT_DIVMOD_UW) ? 3 : 2;
-#else
+#else /* !_WIN64 */
size = (!compiler->mode32) ? 3 : 2;
-#endif
+#endif /* _WIN64 */
inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
FAIL_IF(!inst);
INC_SIZE(size);
@@ -1037,29 +1294,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
*inst++ = REX_W | ((op >= SLJIT_DIVMOD_UW) ? REX_B : 0);
else if (op >= SLJIT_DIVMOD_UW)
*inst++ = REX_B;
- *inst++ = GROUP_F7;
- *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_lmap[TMP_REG1] : reg_lmap[SLJIT_R1]);
-#else
+ inst[0] = GROUP_F7;
+ inst[1] = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_lmap[TMP_REG1] : reg_lmap[SLJIT_R1]);
+#else /* !_WIN64 */
if (!compiler->mode32)
*inst++ = REX_W;
- *inst++ = GROUP_F7;
- *inst = MOD_REG | reg_map[SLJIT_R1];
-#endif
-#endif
+ inst[0] = GROUP_F7;
+ inst[1] = MOD_REG | reg_map[SLJIT_R1];
+#endif /* _WIN64 */
+#endif /* SLJIT_CONFIG_X86_32 */
switch (op) {
case SLJIT_LMUL_UW:
- *inst |= MUL;
+ inst[1] |= MUL;
break;
case SLJIT_LMUL_SW:
- *inst |= IMUL;
+ inst[1] |= IMUL;
break;
case SLJIT_DIVMOD_UW:
case SLJIT_DIV_UW:
- *inst |= DIV;
+ inst[1] |= DIV;
break;
case SLJIT_DIVMOD_SW:
case SLJIT_DIV_SW:
- *inst |= IDIV;
+ inst[1] |= IDIV;
break;
}
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && !defined(_WIN64)
@@ -1079,32 +1336,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
return SLJIT_SUCCESS;
}
-#define ENCODE_PREFIX(prefix) \
- do { \
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); \
- FAIL_IF(!inst); \
- INC_SIZE(1); \
- *inst = (prefix); \
- } while (0)
-
static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
sljit_u8* inst;
sljit_s32 dst_r;
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- sljit_s32 work_r;
-#endif
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
compiler->mode32 = 0;
#endif
- if (src & SLJIT_IMM) {
+ if (src == SLJIT_IMM) {
if (FAST_IS_REG(dst)) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
+ return emit_do_imm(compiler, MOV_r_i32 | reg_map[dst], srcw);
#else
inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, 0);
FAIL_IF(!inst);
@@ -1130,100 +1376,33 @@ static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign,
#else
dst_r = src;
#endif
- }
+ } else {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- else if (FAST_IS_REG(src) && reg_map[src] >= 4) {
- /* src, dst are registers. */
- SLJIT_ASSERT(SLOW_IS_REG(dst));
- if (reg_map[dst] < 4) {
- if (dst != src)
- EMIT_MOV(compiler, dst, 0, src, 0);
- inst = emit_x86_instruction(compiler, 2, dst, 0, dst, 0);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = sign ? MOVSX_r_rm8 : MOVZX_r_rm8;
- }
- else {
- if (dst != src)
- EMIT_MOV(compiler, dst, 0, src, 0);
- if (sign) {
- /* shl reg, 24 */
- inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 24, dst, 0);
- FAIL_IF(!inst);
- *inst |= SHL;
- /* sar reg, 24 */
- inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 24, dst, 0);
- FAIL_IF(!inst);
- *inst |= SAR;
- }
- else {
+ if (FAST_IS_REG(src) && reg_map[src] >= 4) {
+ /* Both src and dst are registers. */
+ SLJIT_ASSERT(FAST_IS_REG(dst));
+
+ if (src == dst && !sign) {
inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 0xff, dst, 0);
FAIL_IF(!inst);
*(inst + 1) |= AND;
+ return SLJIT_SUCCESS;
}
+
+ EMIT_MOV(compiler, TMP_REG1, 0, src, 0);
+ src = TMP_REG1;
+ srcw = 0;
}
- return SLJIT_SUCCESS;
- }
-#endif
- else {
+#endif /* !SLJIT_CONFIG_X86_32 */
+
/* src can be memory addr or reg_map[src] < 4 on x86_32 architectures. */
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = sign ? MOVSX_r_rm8 : MOVZX_r_rm8;
+ FAIL_IF(emit_groupf(compiler, sign ? MOVSX_r_rm8 : MOVZX_r_rm8, dst_r, src, srcw));
}
if (dst & SLJIT_MEM) {
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- if (dst_r == TMP_REG1) {
- /* Find a non-used register, whose reg_map[src] < 4. */
- if ((dst & REG_MASK) == SLJIT_R0) {
- if ((dst & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_R1))
- work_r = SLJIT_R2;
- else
- work_r = SLJIT_R1;
- }
- else {
- if ((dst & OFFS_REG_MASK) != TO_OFFS_REG(SLJIT_R0))
- work_r = SLJIT_R0;
- else if ((dst & REG_MASK) == SLJIT_R1)
- work_r = SLJIT_R2;
- else
- work_r = SLJIT_R1;
- }
-
- if (work_r == SLJIT_R0) {
- ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]);
- }
- else {
- inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0);
- FAIL_IF(!inst);
- *inst = XCHG_r_rm;
- }
-
- inst = emit_x86_instruction(compiler, 1, work_r, 0, dst, dstw);
- FAIL_IF(!inst);
- *inst = MOV_rm8_r8;
-
- if (work_r == SLJIT_R0) {
- ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]);
- }
- else {
- inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0);
- FAIL_IF(!inst);
- *inst = XCHG_r_rm;
- }
- }
- else {
- inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw);
- FAIL_IF(!inst);
- *inst = MOV_rm8_r8;
- }
-#else
inst = emit_x86_instruction(compiler, 1 | EX86_REX | EX86_NO_REXW, dst_r, 0, dst, dstw);
FAIL_IF(!inst);
*inst = MOV_rm8_r8;
-#endif
}
return SLJIT_SUCCESS;
@@ -1240,15 +1419,15 @@ static sljit_s32 emit_prefetch(struct sljit_compiler *compiler, sljit_s32 op,
inst = emit_x86_instruction(compiler, 2, 0, 0, src, srcw);
FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst++ = PREFETCH;
+ inst[0] = GROUP_0F;
+ inst[1] = PREFETCH;
if (op == SLJIT_PREFETCH_L1)
- *inst |= (1 << 3);
+ inst[2] |= (1 << 3);
else if (op == SLJIT_PREFETCH_L2)
- *inst |= (2 << 3);
+ inst[2] |= (2 << 3);
else if (op == SLJIT_PREFETCH_L3)
- *inst |= (3 << 3);
+ inst[2] |= (3 << 3);
return SLJIT_SUCCESS;
}
@@ -1264,10 +1443,10 @@ static sljit_s32 emit_mov_half(struct sljit_compiler *compiler, sljit_s32 sign,
compiler->mode32 = 0;
#endif
- if (src & SLJIT_IMM) {
+ if (src == SLJIT_IMM) {
if (FAST_IS_REG(dst)) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
+ return emit_do_imm(compiler, MOV_r_i32 | reg_map[dst], srcw);
#else
inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, 0);
FAIL_IF(!inst);
@@ -1285,12 +1464,8 @@ static sljit_s32 emit_mov_half(struct sljit_compiler *compiler, sljit_s32 sign,
if ((dst & SLJIT_MEM) && FAST_IS_REG(src))
dst_r = src;
- else {
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = sign ? MOVSX_r_rm16 : MOVZX_r_rm16;
- }
+ else
+ FAIL_IF(emit_groupf(compiler, sign ? MOVSX_r_rm16 : MOVZX_r_rm16, dst_r, src, srcw));
if (dst & SLJIT_MEM) {
inst = emit_x86_instruction(compiler, 1 | EX86_NO_REXW | EX86_PREF_66, dst_r, 0, dst, dstw);
@@ -1311,136 +1486,206 @@ static sljit_s32 emit_unary(struct sljit_compiler *compiler, sljit_u8 opcode,
/* Same input and output */
inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw);
FAIL_IF(!inst);
- *inst++ = GROUP_F7;
- *inst |= opcode;
- return SLJIT_SUCCESS;
- }
-
- if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED))
- dst = TMP_REG1;
-
- if (FAST_IS_REG(dst)) {
- EMIT_MOV(compiler, dst, 0, src, srcw);
- inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0);
- FAIL_IF(!inst);
- *inst++ = GROUP_F7;
- *inst |= opcode;
+ inst[0] = GROUP_F7;
+ inst[1] |= opcode;
return SLJIT_SUCCESS;
}
- EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
- inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0);
- FAIL_IF(!inst);
- *inst++ = GROUP_F7;
- *inst |= opcode;
- EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
- return SLJIT_SUCCESS;
-}
-
-static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src, sljit_sw srcw)
-{
- sljit_u8* inst;
-
- if (dst == SLJIT_UNUSED)
- dst = TMP_REG1;
-
if (FAST_IS_REG(dst)) {
EMIT_MOV(compiler, dst, 0, src, srcw);
inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0);
FAIL_IF(!inst);
- *inst++ = GROUP_F7;
- *inst |= NOT_rm;
- inst = emit_x86_instruction(compiler, 1, dst, 0, dst, 0);
- FAIL_IF(!inst);
- *inst = OR_r_rm;
+ inst[0] = GROUP_F7;
+ inst[1] |= opcode;
return SLJIT_SUCCESS;
}
EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0);
FAIL_IF(!inst);
- *inst++ = GROUP_F7;
- *inst |= NOT_rm;
- inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, TMP_REG1, 0);
- FAIL_IF(!inst);
- *inst = OR_r_rm;
+ inst[0] = GROUP_F7;
+ inst[1] |= opcode;
EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
return SLJIT_SUCCESS;
}
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
static const sljit_sw emit_clz_arg = 32 + 31;
+static const sljit_sw emit_ctz_arg = 32;
#endif
-static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags,
+static sljit_s32 emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 is_clz,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
sljit_u8* inst;
sljit_s32 dst_r;
+ sljit_sw max;
- SLJIT_UNUSED_ARG(op_flags);
-
- if (cpu_has_cmov == -1)
- get_cpu_features();
+ SLJIT_ASSERT(cpu_feature_list != 0);
dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = BSR_r_rm;
+ if (is_clz ? (cpu_feature_list & CPU_FEATURE_LZCNT) : (cpu_feature_list & CPU_FEATURE_TZCNT)) {
+ FAIL_IF(emit_groupf(compiler, (is_clz ? LZCNT_r_rm : TZCNT_r_rm) | EX86_PREF_F3, dst_r, src, srcw));
+
+ if (dst & SLJIT_MEM)
+ EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
+ return SLJIT_SUCCESS;
+ }
+
+ FAIL_IF(emit_groupf(compiler, is_clz ? BSR_r_rm : BSF_r_rm, dst_r, src, srcw));
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- if (cpu_has_cmov) {
+ max = is_clz ? (32 + 31) : 32;
+
+ if (cpu_feature_list & CPU_FEATURE_CMOV) {
if (dst_r != TMP_REG1) {
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 32 + 31);
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, max);
inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG1, 0);
}
else
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), (sljit_sw)&emit_clz_arg);
+ inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), is_clz ? (sljit_sw)&emit_clz_arg : (sljit_sw)&emit_ctz_arg);
FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = CMOVE_r_rm;
+ inst[0] = GROUP_0F;
+ inst[1] = CMOVE_r_rm;
}
else
- FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, 32 + 31));
-
- inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0);
-#else
- if (cpu_has_cmov) {
- EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31));
+ FAIL_IF(emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, max));
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0);
+ if (is_clz) {
+ inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0);
FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = CMOVE_r_rm;
+ *(inst + 1) |= XOR;
}
+#else
+ if (is_clz)
+ max = compiler->mode32 ? (32 + 31) : (64 + 63);
else
- FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31)));
+ max = compiler->mode32 ? 32 : 64;
- inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? 63 : 31, dst_r, 0);
-#endif
+ if (cpu_feature_list & CPU_FEATURE_CMOV) {
+ EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, max);
+ FAIL_IF(emit_groupf(compiler, CMOVE_r_rm, dst_r, TMP_REG2, 0));
+ } else
+ FAIL_IF(emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, max));
- FAIL_IF(!inst);
- *(inst + 1) |= XOR;
+ if (is_clz) {
+ inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, max >> 1, dst_r, 0);
+ FAIL_IF(!inst);
+ *(inst + 1) |= XOR;
+ }
+#endif
if (dst & SLJIT_MEM)
EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
return SLJIT_SUCCESS;
}
+static sljit_s32 emit_bswap(struct sljit_compiler *compiler,
+ sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_u8 *inst;
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ sljit_uw size;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ sljit_u8 rex = 0;
+#else /* !SLJIT_CONFIG_X86_64 */
+ sljit_s32 dst_is_ereg = op & SLJIT_32;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (op == SLJIT_REV_U32 || op == SLJIT_REV_S32)
+ compiler->mode32 = 1;
+#else /* !SLJIT_CONFIG_X86_64 */
+ op &= ~SLJIT_32;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (src != dst_r) {
+ /* Only the lower 16 bit is read for eregs. */
+ if (op == SLJIT_REV_U16 || op == SLJIT_REV_S16)
+ FAIL_IF(emit_mov_half(compiler, 0, dst_r, 0, src, srcw));
+ else
+ EMIT_MOV(compiler, dst_r, 0, src, srcw);
+ }
+
+ size = 2;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (!compiler->mode32)
+ rex = REX_W;
+
+ if (reg_map[dst_r] >= 8)
+ rex |= REX_B;
+
+ if (rex != 0)
+ size++;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
+ FAIL_IF(!inst);
+ INC_SIZE(size);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (rex != 0)
+ *inst++ = rex;
+
+ inst[0] = GROUP_0F;
+ inst[1] = BSWAP_r | reg_lmap[dst_r];
+#else /* !SLJIT_CONFIG_X86_64 */
+ inst[0] = GROUP_0F;
+ inst[1] = BSWAP_r | reg_map[dst_r];
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (op == SLJIT_REV_U16 || op == SLJIT_REV_S16) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ size = compiler->mode32 ? 16 : 48;
+#else /* !SLJIT_CONFIG_X86_64 */
+ size = 16;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, (sljit_sw)size, dst_r, 0);
+ FAIL_IF(!inst);
+ if (op == SLJIT_REV_U16)
+ inst[1] |= SHR;
+ else
+ inst[1] |= SAR;
+ }
+
+ if (dst & SLJIT_MEM) {
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ if (dst_is_ereg)
+ op = SLJIT_REV;
+#endif /* SLJIT_CONFIG_X86_32 */
+ if (op == SLJIT_REV_U16 || op == SLJIT_REV_S16)
+ return emit_mov_half(compiler, 0, dst, dstw, TMP_REG1, 0);
+
+ return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
+ }
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (op == SLJIT_REV_S32) {
+ compiler->mode32 = 0;
+ inst = emit_x86_instruction(compiler, 1, dst, 0, dst, 0);
+ FAIL_IF(!inst);
+ *inst = MOVSXD_r_rm;
+ }
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ return SLJIT_SUCCESS;
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
- sljit_s32 op_flags = GET_ALL_FLAGS(op);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
sljit_s32 dst_is_ereg = 0;
-#endif
+#else /* !SLJIT_CONFIG_X86_32 */
+ sljit_s32 op_flags = GET_ALL_FLAGS(op);
+#endif /* SLJIT_CONFIG_X86_32 */
CHECK_ERROR();
CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
@@ -1450,35 +1695,35 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
CHECK_EXTRA_REGS(dst, dstw, dst_is_ereg = 1);
CHECK_EXTRA_REGS(src, srcw, (void)0);
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- compiler->mode32 = op_flags & SLJIT_I32_OP;
-#endif
+ compiler->mode32 = op_flags & SLJIT_32;
+#endif /* SLJIT_CONFIG_X86_64 */
op = GET_OPCODE(op);
if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
compiler->mode32 = 0;
-#endif
+#endif /* SLJIT_CONFIG_X86_64 */
if (FAST_IS_REG(src) && src == dst) {
if (!TYPE_CAST_NEEDED(op))
return SLJIT_SUCCESS;
}
- if (op_flags & SLJIT_I32_OP) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (op_flags & SLJIT_32) {
if (src & SLJIT_MEM) {
if (op == SLJIT_MOV_S32)
op = SLJIT_MOV_U32;
}
- else if (src & SLJIT_IMM) {
+ else if (src == SLJIT_IMM) {
if (op == SLJIT_MOV_U32)
op = SLJIT_MOV_S32;
}
-#endif
}
+#endif /* SLJIT_CONFIG_X86_64 */
- if (src & SLJIT_IMM) {
+ if (src == SLJIT_IMM) {
switch (op) {
case SLJIT_MOV_U8:
srcw = (sljit_u8)srcw;
@@ -1499,12 +1744,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
case SLJIT_MOV_S32:
srcw = (sljit_s32)srcw;
break;
-#endif
+#endif /* SLJIT_CONFIG_X86_64 */
}
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
if (SLJIT_UNLIKELY(dst_is_ereg))
return emit_mov(compiler, dst, dstw, src, srcw);
-#endif
+#endif /* SLJIT_CONFIG_X86_32 */
}
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
@@ -1512,7 +1757,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
SLJIT_ASSERT(dst == SLJIT_MEM1(SLJIT_SP));
dst = TMP_REG1;
}
-#endif
+#endif /* SLJIT_CONFIG_X86_32 */
switch (op) {
case SLJIT_MOV:
@@ -1520,8 +1765,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
-#endif
- FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw));
+ case SLJIT_MOV32:
+#endif /* SLJIT_CONFIG_X86_32 */
+ EMIT_MOV(compiler, dst, dstw, src, srcw);
break;
case SLJIT_MOV_U8:
FAIL_IF(emit_mov_byte(compiler, 0, dst, dstw, src, srcw));
@@ -1542,62 +1788,40 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
case SLJIT_MOV_S32:
FAIL_IF(emit_mov_int(compiler, 1, dst, dstw, src, srcw));
break;
-#endif
+ case SLJIT_MOV32:
+ compiler->mode32 = 1;
+ EMIT_MOV(compiler, dst, dstw, src, srcw);
+ compiler->mode32 = 0;
+ break;
+#endif /* SLJIT_CONFIG_X86_64 */
}
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
if (SLJIT_UNLIKELY(dst_is_ereg) && dst == TMP_REG1)
return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), dstw, TMP_REG1, 0);
-#endif
+#endif /* SLJIT_CONFIG_X86_32 */
return SLJIT_SUCCESS;
}
switch (op) {
- case SLJIT_NOT:
- if (SLJIT_UNLIKELY(op_flags & SLJIT_SET_Z))
- return emit_not_with_flags(compiler, dst, dstw, src, srcw);
- return emit_unary(compiler, NOT_rm, dst, dstw, src, srcw);
-
- case SLJIT_NEG:
- return emit_unary(compiler, NEG_rm, dst, dstw, src, srcw);
-
case SLJIT_CLZ:
- return emit_clz(compiler, op_flags, dst, dstw, src, srcw);
+ case SLJIT_CTZ:
+ return emit_clz_ctz(compiler, (op == SLJIT_CLZ), dst, dstw, src, srcw);
+ case SLJIT_REV:
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ if (dst_is_ereg)
+ op |= SLJIT_32;
+#endif /* SLJIT_CONFIG_X86_32 */
+ return emit_bswap(compiler, op, dst, dstw, src, srcw);
}
return SLJIT_SUCCESS;
}
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-
-#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
- if (IS_HALFWORD(immw) || compiler->mode32) { \
- inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \
- FAIL_IF(!inst); \
- *(inst + 1) |= (op_imm); \
- } \
- else { \
- FAIL_IF(emit_load_imm64(compiler, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, immw)); \
- inst = emit_x86_instruction(compiler, 1, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, 0, arg, argw); \
- FAIL_IF(!inst); \
- *inst = (op_mr); \
- }
-
-#define BINARY_EAX_IMM(op_eax_imm, immw) \
- FAIL_IF(emit_do_imm32(compiler, (!compiler->mode32) ? REX_W : 0, (op_eax_imm), immw))
-
-#else
-
-#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
- inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \
- FAIL_IF(!inst); \
- *(inst + 1) |= (op_imm);
-
-#define BINARY_EAX_IMM(op_eax_imm, immw) \
- FAIL_IF(emit_do_imm(compiler, (op_eax_imm), immw))
-
-#endif
-
static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
sljit_u32 op_types,
sljit_s32 dst, sljit_sw dstw,
@@ -1605,26 +1829,13 @@ static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
sljit_s32 src2, sljit_sw src2w)
{
sljit_u8* inst;
- sljit_u8 op_eax_imm = (op_types >> 24);
- sljit_u8 op_rm = (op_types >> 16) & 0xff;
- sljit_u8 op_mr = (op_types >> 8) & 0xff;
- sljit_u8 op_imm = op_types & 0xff;
-
- if (dst == SLJIT_UNUSED) {
- EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- if (src2 & SLJIT_IMM) {
- BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
- }
- else {
- inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
- FAIL_IF(!inst);
- *inst = op_rm;
- }
- return SLJIT_SUCCESS;
- }
+ sljit_u8 op_eax_imm = U8(op_types >> 24);
+ sljit_u8 op_rm = U8((op_types >> 16) & 0xff);
+ sljit_u8 op_mr = U8((op_types >> 8) & 0xff);
+ sljit_u8 op_imm = U8(op_types & 0xff);
if (dst == src1 && dstw == src1w) {
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
#else
@@ -1658,7 +1869,7 @@ static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
/* Only for cumulative operations. */
if (dst == src2 && dstw == src2w) {
- if (src1 & SLJIT_IMM) {
+ if (src1 == SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if ((dst == SLJIT_R0) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) {
#else
@@ -1692,7 +1903,7 @@ static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
/* General version. */
if (FAST_IS_REG(dst)) {
EMIT_MOV(compiler, dst, 0, src1, src1w);
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
BINARY_IMM(op_imm, op_mr, src2w, dst, 0);
}
else {
@@ -1704,7 +1915,7 @@ static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
else {
/* This version requires less memory writing. */
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
}
else {
@@ -1725,26 +1936,13 @@ static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
sljit_s32 src2, sljit_sw src2w)
{
sljit_u8* inst;
- sljit_u8 op_eax_imm = (op_types >> 24);
- sljit_u8 op_rm = (op_types >> 16) & 0xff;
- sljit_u8 op_mr = (op_types >> 8) & 0xff;
- sljit_u8 op_imm = op_types & 0xff;
-
- if (dst == SLJIT_UNUSED) {
- EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- if (src2 & SLJIT_IMM) {
- BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
- }
- else {
- inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
- FAIL_IF(!inst);
- *inst = op_rm;
- }
- return SLJIT_SUCCESS;
- }
+ sljit_u8 op_eax_imm = U8(op_types >> 24);
+ sljit_u8 op_rm = U8((op_types >> 16) & 0xff);
+ sljit_u8 op_mr = U8((op_types >> 8) & 0xff);
+ sljit_u8 op_imm = U8(op_types & 0xff);
if (dst == src1 && dstw == src1w) {
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
#else
@@ -1778,7 +1976,7 @@ static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
/* General version. */
if (FAST_IS_REG(dst) && dst != src2) {
EMIT_MOV(compiler, dst, 0, src1, src1w);
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
BINARY_IMM(op_imm, op_mr, src2w, dst, 0);
}
else {
@@ -1790,7 +1988,7 @@ static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
else {
/* This version requires less memory writing. */
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
}
else {
@@ -1810,25 +2008,15 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler,
sljit_s32 src2, sljit_sw src2w)
{
sljit_u8* inst;
- sljit_s32 dst_r;
-
- dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
/* Register destination. */
- if (dst_r == src1 && !(src2 & SLJIT_IMM)) {
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, src2, src2w);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = IMUL_r_rm;
- }
- else if (dst_r == src2 && !(src1 & SLJIT_IMM)) {
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, src1, src1w);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = IMUL_r_rm;
- }
- else if (src1 & SLJIT_IMM) {
- if (src2 & SLJIT_IMM) {
+ if (dst_r == src1 && src2 != SLJIT_IMM) {
+ FAIL_IF(emit_groupf(compiler, IMUL_r_rm, dst_r, src2, src2w));
+ } else if (dst_r == src2 && src1 != SLJIT_IMM) {
+ FAIL_IF(emit_groupf(compiler, IMUL_r_rm, dst_r, src1, src1w));
+ } else if (src1 == SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
EMIT_MOV(compiler, dst_r, 0, SLJIT_IMM, src2w);
src2 = dst_r;
src2w = 0;
@@ -1838,10 +2026,8 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler,
inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w);
FAIL_IF(!inst);
*inst = IMUL_r_rm_i8;
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- *inst = (sljit_s8)src1w;
+
+ FAIL_IF(emit_byte(compiler, U8(src1w)));
}
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
else {
@@ -1867,30 +2053,26 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler,
if (dst_r != src2)
EMIT_MOV(compiler, dst_r, 0, src2, src2w);
FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src1w));
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = IMUL_r_rm;
+ FAIL_IF(emit_groupf(compiler, IMUL_r_rm, dst_r, TMP_REG2, 0));
}
#endif
}
- else if (src2 & SLJIT_IMM) {
+ else if (src2 == SLJIT_IMM) {
/* Note: src1 is NOT immediate. */
if (src2w <= 127 && src2w >= -128) {
inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w);
FAIL_IF(!inst);
*inst = IMUL_r_rm_i8;
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- *inst = (sljit_s8)src2w;
+
+ FAIL_IF(emit_byte(compiler, U8(src2w)));
}
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
else {
inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w);
FAIL_IF(!inst);
*inst = IMUL_r_rm_i32;
+
inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
FAIL_IF(!inst);
INC_SIZE(4);
@@ -1901,31 +2083,24 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler,
inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w);
FAIL_IF(!inst);
*inst = IMUL_r_rm_i32;
+
inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
FAIL_IF(!inst);
INC_SIZE(4);
sljit_unaligned_store_s32(inst, (sljit_s32)src2w);
- }
- else {
+ } else {
if (dst_r != src1)
EMIT_MOV(compiler, dst_r, 0, src1, src1w);
FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w));
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = IMUL_r_rm;
+ FAIL_IF(emit_groupf(compiler, IMUL_r_rm, dst_r, TMP_REG2, 0));
}
#endif
- }
- else {
+ } else {
/* Neither argument is immediate. */
if (ADDRESSING_DEPENDS_ON(src2, dst_r))
dst_r = TMP_REG1;
EMIT_MOV(compiler, dst_r, 0, src1, src1w);
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, src2, src2w);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = IMUL_r_rm;
+ FAIL_IF(emit_groupf(compiler, IMUL_r_rm, dst_r, src2, src2w));
}
if (dst & SLJIT_MEM)
@@ -1958,10 +2133,10 @@ static sljit_s32 emit_lea_binary(struct sljit_compiler *compiler,
done = 1;
}
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if ((src2 & SLJIT_IMM) && (compiler->mode32 || IS_HALFWORD(src2w))) {
+ if (src2 == SLJIT_IMM && (compiler->mode32 || IS_HALFWORD(src2w))) {
inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src1), (sljit_s32)src2w);
#else
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src1), src2w);
#endif
FAIL_IF(!inst);
@@ -1971,10 +2146,10 @@ static sljit_s32 emit_lea_binary(struct sljit_compiler *compiler,
}
else if (FAST_IS_REG(src2)) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if ((src1 & SLJIT_IMM) && (compiler->mode32 || IS_HALFWORD(src1w))) {
+ if (src1 == SLJIT_IMM && (compiler->mode32 || IS_HALFWORD(src1w))) {
inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src2), (sljit_s32)src1w);
#else
- if (src1 & SLJIT_IMM) {
+ if (src1 == SLJIT_IMM) {
inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src2), src1w);
#endif
FAIL_IF(!inst);
@@ -1998,16 +2173,16 @@ static sljit_s32 emit_cmp_binary(struct sljit_compiler *compiler,
sljit_u8* inst;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
+ if (src1 == SLJIT_R0 && src2 == SLJIT_IMM && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
#else
- if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) {
+ if (src1 == SLJIT_R0 && src2 == SLJIT_IMM && (src2w > 127 || src2w < -128)) {
#endif
BINARY_EAX_IMM(CMP_EAX_i32, src2w);
return SLJIT_SUCCESS;
}
if (FAST_IS_REG(src1)) {
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
BINARY_IMM(CMP, CMP_rm_r, src2w, src1, 0);
}
else {
@@ -2018,15 +2193,15 @@ static sljit_s32 emit_cmp_binary(struct sljit_compiler *compiler,
return SLJIT_SUCCESS;
}
- if (FAST_IS_REG(src2) && !(src1 & SLJIT_IMM)) {
+ if (FAST_IS_REG(src2) && src1 != SLJIT_IMM) {
inst = emit_x86_instruction(compiler, 1, src2, 0, src1, src1w);
FAIL_IF(!inst);
*inst = CMP_rm_r;
return SLJIT_SUCCESS;
}
- if (src2 & SLJIT_IMM) {
- if (src1 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
+ if (src1 == SLJIT_IMM) {
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
src1 = TMP_REG1;
src1w = 0;
@@ -2049,25 +2224,25 @@ static sljit_s32 emit_test_binary(struct sljit_compiler *compiler,
sljit_u8* inst;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
+ if (src1 == SLJIT_R0 && src2 == SLJIT_IMM && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
#else
- if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) {
+ if (src1 == SLJIT_R0 && src2 == SLJIT_IMM && (src2w > 127 || src2w < -128)) {
#endif
BINARY_EAX_IMM(TEST_EAX_i32, src2w);
return SLJIT_SUCCESS;
}
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) {
+ if (src2 == SLJIT_R0 && src1 == SLJIT_IMM && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) {
#else
- if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128)) {
+ if (src2 == SLJIT_R0 && src1 == SLJIT_IMM && (src1w > 127 || src1w < -128)) {
#endif
BINARY_EAX_IMM(TEST_EAX_i32, src1w);
return SLJIT_SUCCESS;
}
- if (!(src1 & SLJIT_IMM)) {
- if (src2 & SLJIT_IMM) {
+ if (src1 != SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if (IS_HALFWORD(src2w) || compiler->mode32) {
inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, src1, src1w);
@@ -2095,8 +2270,8 @@ static sljit_s32 emit_test_binary(struct sljit_compiler *compiler,
}
}
- if (!(src2 & SLJIT_IMM)) {
- if (src1 & SLJIT_IMM) {
+ if (src2 != SLJIT_IMM) {
+ if (src1 == SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if (IS_HALFWORD(src1w) || compiler->mode32) {
inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src1w, src2, src2w);
@@ -2125,7 +2300,7 @@ static sljit_s32 emit_test_binary(struct sljit_compiler *compiler,
}
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if (IS_HALFWORD(src2w) || compiler->mode32) {
inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, TMP_REG1, 0);
@@ -2158,27 +2333,23 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ sljit_s32 mode32;
+#endif
sljit_u8* inst;
- if ((src2 & SLJIT_IMM) || (src2 == SLJIT_PREF_SHIFT_REG)) {
+ if (src2 == SLJIT_IMM || src2 == SLJIT_PREF_SHIFT_REG) {
if (dst == src1 && dstw == src1w) {
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, dst, dstw);
FAIL_IF(!inst);
- *inst |= mode;
- return SLJIT_SUCCESS;
- }
- if (dst == SLJIT_UNUSED) {
- EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REG1, 0);
- FAIL_IF(!inst);
- *inst |= mode;
+ inst[1] |= mode;
return SLJIT_SUCCESS;
}
if (dst == SLJIT_PREF_SHIFT_REG && src2 == SLJIT_PREF_SHIFT_REG) {
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
FAIL_IF(!inst);
- *inst |= mode;
+ inst[1] |= mode;
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
return SLJIT_SUCCESS;
}
@@ -2186,14 +2357,14 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
EMIT_MOV(compiler, dst, 0, src1, src1w);
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, dst, 0);
FAIL_IF(!inst);
- *inst |= mode;
+ inst[1] |= mode;
return SLJIT_SUCCESS;
}
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REG1, 0);
FAIL_IF(!inst);
- *inst |= mode;
+ inst[1] |= mode;
EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
return SLJIT_SUCCESS;
}
@@ -2203,41 +2374,62 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
FAIL_IF(!inst);
- *inst |= mode;
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
+ inst[1] |= mode;
+ return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
}
- else if (SLOW_IS_REG(dst) && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) {
+
+ if (FAST_IS_REG(dst) && dst != src2 && dst != TMP_REG1 && !ADDRESSING_DEPENDS_ON(src2, dst)) {
if (src1 != dst)
EMIT_MOV(compiler, dst, 0, src1, src1w);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ mode32 = compiler->mode32;
+ compiler->mode32 = 0;
+#endif
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = mode32;
+#endif
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, dst, 0);
FAIL_IF(!inst);
- *inst |= mode;
+ inst[1] |= mode;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 0;
+#endif
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = mode32;
+#endif
+ return SLJIT_SUCCESS;
}
- else {
- /* This case is complex since ecx itself may be used for
- addressing, and this case must be supported as well. */
- EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
+
+ /* This case is complex since ecx itself may be used for
+ addressing, and this case must be supported as well. */
+ EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
- inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
- FAIL_IF(!inst);
- *inst |= mode;
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0);
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
+#else /* !SLJIT_CONFIG_X86_32 */
+ mode32 = compiler->mode32;
+ compiler->mode32 = 0;
+ EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
+ compiler->mode32 = mode32;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
+ inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
+ FAIL_IF(!inst);
+ inst[1] |= mode;
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0);
#else
- EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
- inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
- FAIL_IF(!inst);
- *inst |= mode;
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
-#endif
- if (dst != SLJIT_UNUSED)
- return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
- }
+ compiler->mode32 = 0;
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
+ compiler->mode32 = mode32;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ if (dst != TMP_REG1)
+ return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
return SLJIT_SUCCESS;
}
@@ -2249,14 +2441,15 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler,
sljit_s32 src2, sljit_sw src2w)
{
/* The CPU does not set flags if the shift count is 0. */
- if (src2 & SLJIT_IMM) {
+ if (src2 == SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if ((src2w & 0x3f) != 0 || (compiler->mode32 && (src2w & 0x1f) != 0))
- return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w);
-#else
- if ((src2w & 0x1f) != 0)
+ src2w &= compiler->mode32 ? 0x1f : 0x3f;
+#else /* !SLJIT_CONFIG_X86_64 */
+ src2w &= 0x1f;
+#endif /* SLJIT_CONFIG_X86_64 */
+ if (src2w != 0)
return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w);
-#endif
+
if (!set_flags)
return emit_mov(compiler, dst, dstw, src1, src1w);
/* OR dst, src, 0 */
@@ -2273,7 +2466,7 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler,
FAIL_IF(emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w));
if (FAST_IS_REG(dst))
- return emit_cmp_binary(compiler, (dst == SLJIT_UNUSED) ? TMP_REG1 : dst, dstw, SLJIT_IMM, 0);
+ return emit_cmp_binary(compiler, dst, dstw, SLJIT_IMM, 0);
return SLJIT_SUCCESS;
}
@@ -2283,7 +2476,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
sljit_s32 src2, sljit_sw src2w)
{
CHECK_ERROR();
- CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
@@ -2292,11 +2485,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
CHECK_EXTRA_REGS(src1, src1w, (void)0);
CHECK_EXTRA_REGS(src2, src2w, (void)0);
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- compiler->mode32 = op & SLJIT_I32_OP;
+ compiler->mode32 = op & SLJIT_32;
#endif
- if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
- return SLJIT_SUCCESS;
+ SLJIT_ASSERT(dst != TMP_REG1 || HAS_FLAGS(op));
switch (GET_OPCODE(op)) {
case SLJIT_ADD:
@@ -2310,17 +2502,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return emit_cum_binary(compiler, BINARY_OPCODE(ADC),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUB:
+ if (src1 == SLJIT_IMM && src1w == 0)
+ return emit_unary(compiler, NEG_rm, dst, dstw, src2, src2w);
+
if (!HAS_FLAGS(op)) {
- if ((src2 & SLJIT_IMM) && emit_lea_binary(compiler, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED)
+ if (src2 == SLJIT_IMM && emit_lea_binary(compiler, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED)
return compiler->error;
- if (SLOW_IS_REG(dst) && src2 == dst) {
+ if (FAST_IS_REG(dst) && src2 == dst) {
FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), dst, 0, dst, 0, src1, src1w));
return emit_unary(compiler, NEG_rm, dst, 0, dst, 0);
}
}
- if (dst == SLJIT_UNUSED)
- return emit_cmp_binary(compiler, src1, src1w, src2, src2w);
return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUBC:
@@ -2329,27 +2522,261 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
case SLJIT_MUL:
return emit_mul(compiler, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_AND:
- if (dst == SLJIT_UNUSED)
- return emit_test_binary(compiler, src1, src1w, src2, src2w);
return emit_cum_binary(compiler, BINARY_OPCODE(AND),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_OR:
return emit_cum_binary(compiler, BINARY_OPCODE(OR),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_XOR:
+ if (!HAS_FLAGS(op)) {
+ if (src2 == SLJIT_IMM && src2w == -1)
+ return emit_unary(compiler, NOT_rm, dst, dstw, src1, src1w);
+ if (src1 == SLJIT_IMM && src1w == -1)
+ return emit_unary(compiler, NOT_rm, dst, dstw, src2, src2w);
+ }
+
return emit_cum_binary(compiler, BINARY_OPCODE(XOR),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SHL:
+ case SLJIT_MSHL:
return emit_shift_with_flags(compiler, SHL, HAS_FLAGS(op),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
return emit_shift_with_flags(compiler, SHR, HAS_FLAGS(op),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_ASHR:
+ case SLJIT_MASHR:
return emit_shift_with_flags(compiler, SAR, HAS_FLAGS(op),
dst, dstw, src1, src1w, src2, src2w);
+ case SLJIT_ROTL:
+ return emit_shift_with_flags(compiler, ROL, 0,
+ dst, dstw, src1, src1w, src2, src2w);
+ case SLJIT_ROTR:
+ return emit_shift_with_flags(compiler, ROR, 0,
+ dst, dstw, src1, src1w, src2, src2w);
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_s32 opcode = GET_OPCODE(op);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+ if (opcode != SLJIT_SUB && opcode != SLJIT_AND) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, TMP_REG1, 0, src1, src1w, src2, src2w);
+ }
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
+
+ CHECK_EXTRA_REGS(src1, src1w, (void)0);
+ CHECK_EXTRA_REGS(src2, src2w, (void)0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = op & SLJIT_32;
+#endif
+
+ if (opcode == SLJIT_SUB) {
+ return emit_cmp_binary(compiler, src1, src1w, src2, src2w);
+ }
+ return emit_test_binary(compiler, src1, src1w, src2, src2w);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
+{
+ sljit_s32 is_rotate, is_left, move_src1;
+ sljit_u8* inst;
+ sljit_sw src1w = 0;
+ sljit_sw dstw = 0;
+ /* The whole register must be saved even for 32 bit operations. */
+ sljit_u8 restore_ecx = 0;
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ sljit_sw src2w = 0;
+ sljit_s32 restore_sp4 = 0;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_shift_into(compiler, op, dst_reg, src1_reg, src2_reg, src3, src3w));
+ ADJUST_LOCAL_OFFSET(src3, src3w);
+
+ CHECK_EXTRA_REGS(dst_reg, dstw, (void)0);
+ CHECK_EXTRA_REGS(src3, src3w, (void)0);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = op & SLJIT_32;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (src3 == SLJIT_IMM) {
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ src3w &= 0x1f;
+#else /* !SLJIT_CONFIG_X86_32 */
+ src3w &= (op & SLJIT_32) ? 0x1f : 0x3f;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ if (src3w == 0)
+ return SLJIT_SUCCESS;
+ }
+
+ is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL);
+
+ is_rotate = (src1_reg == src2_reg);
+ CHECK_EXTRA_REGS(src1_reg, src1w, (void)0);
+ CHECK_EXTRA_REGS(src2_reg, src2w, (void)0);
+
+ if (is_rotate)
+ return emit_shift(compiler, is_left ? ROL : ROR, dst_reg, dstw, src1_reg, src1w, src3, src3w);
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ if (src2_reg & SLJIT_MEM) {
+ EMIT_MOV(compiler, TMP_REG1, 0, src2_reg, src2w);
+ src2_reg = TMP_REG1;
+ }
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ if (dst_reg == SLJIT_PREF_SHIFT_REG && src3 != SLJIT_IMM && (src3 != SLJIT_PREF_SHIFT_REG || src1_reg != SLJIT_PREF_SHIFT_REG)) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ EMIT_MOV(compiler, TMP_REG1, 0, src1_reg, src1w);
+ src1_reg = TMP_REG1;
+ src1w = 0;
+#else /* !SLJIT_CONFIG_X86_64 */
+ if (src2_reg != TMP_REG1) {
+ EMIT_MOV(compiler, TMP_REG1, 0, src1_reg, src1w);
+ src1_reg = TMP_REG1;
+ src1w = 0;
+ } else if ((src1_reg & SLJIT_MEM) || src1_reg == SLJIT_PREF_SHIFT_REG) {
+ restore_sp4 = (src3 == SLJIT_R0) ? SLJIT_R1 : SLJIT_R0;
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), sizeof(sljit_s32), restore_sp4, 0);
+ EMIT_MOV(compiler, restore_sp4, 0, src1_reg, src1w);
+ src1_reg = restore_sp4;
+ src1w = 0;
+ } else {
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), sizeof(sljit_s32), src1_reg, 0);
+ restore_sp4 = src1_reg;
+ }
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (src3 != SLJIT_PREF_SHIFT_REG)
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src3, src3w);
+ } else {
+ if (src2_reg == SLJIT_PREF_SHIFT_REG && src3 != SLJIT_IMM && src3 != SLJIT_PREF_SHIFT_REG) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 0;
+#endif /* SLJIT_CONFIG_X86_64 */
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = op & SLJIT_32;
+#endif /* SLJIT_CONFIG_X86_64 */
+ src2_reg = TMP_REG1;
+ restore_ecx = 1;
+ }
+
+ move_src1 = 0;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (dst_reg != src1_reg) {
+ if (dst_reg != src3) {
+ EMIT_MOV(compiler, dst_reg, 0, src1_reg, src1w);
+ src1_reg = dst_reg;
+ src1w = 0;
+ } else
+ move_src1 = 1;
+ }
+#else /* !SLJIT_CONFIG_X86_64 */
+ if (dst_reg & SLJIT_MEM) {
+ if (src2_reg != TMP_REG1) {
+ EMIT_MOV(compiler, TMP_REG1, 0, src1_reg, src1w);
+ src1_reg = TMP_REG1;
+ src1w = 0;
+ } else if ((src1_reg & SLJIT_MEM) || src1_reg == SLJIT_PREF_SHIFT_REG) {
+ restore_sp4 = (src3 == SLJIT_R0) ? SLJIT_R1 : SLJIT_R0;
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), sizeof(sljit_s32), restore_sp4, 0);
+ EMIT_MOV(compiler, restore_sp4, 0, src1_reg, src1w);
+ src1_reg = restore_sp4;
+ src1w = 0;
+ } else {
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), sizeof(sljit_s32), src1_reg, 0);
+ restore_sp4 = src1_reg;
+ }
+ } else if (dst_reg != src1_reg) {
+ if (dst_reg != src3) {
+ EMIT_MOV(compiler, dst_reg, 0, src1_reg, src1w);
+ src1_reg = dst_reg;
+ src1w = 0;
+ } else
+ move_src1 = 1;
+ }
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (src3 != SLJIT_IMM && src3 != SLJIT_PREF_SHIFT_REG) {
+ if (!restore_ecx) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 0;
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
+ compiler->mode32 = op & SLJIT_32;
+ restore_ecx = 1;
+#else /* !SLJIT_CONFIG_X86_64 */
+ if (src1_reg != TMP_REG1 && src2_reg != TMP_REG1) {
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
+ restore_ecx = 1;
+ } else {
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
+ restore_ecx = 2;
+ }
+#endif /* SLJIT_CONFIG_X86_64 */
+ }
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src3, src3w);
+ }
+
+ if (move_src1) {
+ EMIT_MOV(compiler, dst_reg, 0, src1_reg, src1w);
+ src1_reg = dst_reg;
+ src1w = 0;
+ }
}
+ inst = emit_x86_instruction(compiler, 2, src2_reg, 0, src1_reg, src1w);
+ FAIL_IF(!inst);
+ inst[0] = GROUP_0F;
+
+ if (src3 == SLJIT_IMM) {
+ inst[1] = U8((is_left ? SHLD : SHRD) - 1);
+
+ /* Immediate argument is added separately. */
+ FAIL_IF(emit_byte(compiler, U8(src3w)));
+ } else
+ inst[1] = U8(is_left ? SHLD : SHRD);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (restore_ecx) {
+ compiler->mode32 = 0;
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
+ }
+
+ if (src1_reg != dst_reg) {
+ compiler->mode32 = op & SLJIT_32;
+ return emit_mov(compiler, dst_reg, dstw, src1_reg, 0);
+ }
+#else /* !SLJIT_CONFIG_X86_64 */
+ if (restore_ecx)
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, restore_ecx == 1 ? TMP_REG1 : SLJIT_MEM1(SLJIT_SP), 0);
+
+ if (src1_reg != dst_reg)
+ EMIT_MOV(compiler, dst_reg, dstw, src1_reg, 0);
+
+ if (restore_sp4)
+ return emit_mov(compiler, restore_sp4, 0, SLJIT_MEM1(SLJIT_SP), sizeof(sljit_s32));
+#endif /* SLJIT_CONFIG_X86_32 */
+
return SLJIT_SUCCESS;
}
@@ -2369,7 +2796,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
/* Don't adjust shadow stack if it isn't enabled. */
if (!cpu_has_shadow_stack ())
return SLJIT_SUCCESS;
- return adjust_shadow_stack(compiler, src, srcw, SLJIT_UNUSED, 0);
+ return adjust_shadow_stack(compiler, src, srcw);
case SLJIT_PREFETCH_L1:
case SLJIT_PREFETCH_L2:
case SLJIT_PREFETCH_L3:
@@ -2380,28 +2807,45 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
{
- CHECK_REG_INDEX(check_sljit_get_register_index(reg));
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- if (reg >= SLJIT_R3 && reg <= SLJIT_R8)
- return -1;
-#endif
- return reg_map[reg];
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_dst(compiler, op, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ CHECK_EXTRA_REGS(dst, dstw, (void)0);
+
+ switch (op) {
+ case SLJIT_FAST_ENTER:
+ return emit_fast_enter(compiler, dst, dstw);
+ case SLJIT_GET_RETURN_ADDRESS:
+ return sljit_emit_get_return_address(compiler, dst, dstw);
+ }
+
+ return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
{
- CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
+ CHECK_REG_INDEX(check_sljit_get_register_index(type, reg));
+
+ if (type == SLJIT_GP_REGISTER) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- return reg;
-#else
+ if (reg >= SLJIT_R3 && reg <= SLJIT_R8)
+ return -1;
+#endif /* SLJIT_CONFIG_X86_32 */
+ return reg_map[reg];
+ }
+
+ if (type != SLJIT_FLOAT_REGISTER && type != SLJIT_SIMD_REG_128 && type != SLJIT_SIMD_REG_256 && type != SLJIT_SIMD_REG_512)
+ return -1;
+
return freg_map[reg];
-#endif
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
sljit_u8 *inst;
@@ -2420,13 +2864,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
/* --------------------------------------------------------------------- */
/* Alignment(3) + 4 * 16 bytes. */
-static sljit_s32 sse2_data[3 + (4 * 4)];
-static sljit_s32 *sse2_buffer;
+static sljit_u32 sse2_data[3 + (4 * 4)];
+static sljit_u32 *sse2_buffer;
static void init_compiler(void)
{
+ get_cpu_features();
+
/* Align to 16 bytes. */
- sse2_buffer = (sljit_s32*)(((sljit_uw)sse2_data + 15) & ~0xf);
+ sse2_buffer = (sljit_u32*)(((sljit_uw)sse2_data + 15) & ~(sljit_uw)0xf);
/* Single precision constants (each constant is 16 byte long). */
sse2_buffer[0] = 0x80000000;
@@ -2438,58 +2884,60 @@ static void init_compiler(void)
sse2_buffer[13] = 0x7fffffff;
}
-static sljit_s32 emit_sse2(struct sljit_compiler *compiler, sljit_u8 opcode,
- sljit_s32 single, sljit_s32 xmm1, sljit_s32 xmm2, sljit_sw xmm2w)
+static sljit_s32 emit_groupf(struct sljit_compiler *compiler,
+ sljit_uw op,
+ sljit_s32 dst, sljit_s32 src, sljit_sw srcw)
{
- sljit_u8 *inst;
-
- inst = emit_x86_instruction(compiler, 2 | (single ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2, xmm1, 0, xmm2, xmm2w);
+ sljit_u8 *inst = emit_x86_instruction(compiler, 2 | (op & ~(sljit_uw)0xff), dst, 0, src, srcw);
FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = opcode;
+ inst[0] = GROUP_0F;
+ inst[1] = op & 0xff;
return SLJIT_SUCCESS;
}
-static sljit_s32 emit_sse2_logic(struct sljit_compiler *compiler, sljit_u8 opcode,
- sljit_s32 pref66, sljit_s32 xmm1, sljit_s32 xmm2, sljit_sw xmm2w)
+static sljit_s32 emit_groupf_ext(struct sljit_compiler *compiler,
+ sljit_uw op,
+ sljit_s32 dst, sljit_s32 src, sljit_sw srcw)
{
sljit_u8 *inst;
- inst = emit_x86_instruction(compiler, 2 | (pref66 ? EX86_PREF_66 : 0) | EX86_SSE2, xmm1, 0, xmm2, xmm2w);
+ SLJIT_ASSERT((op & EX86_SSE2) && ((op & VEX_OP_0F38) || (op & VEX_OP_0F3A)));
+
+ inst = emit_x86_instruction(compiler, 3 | (op & ~((sljit_uw)0xff | VEX_OP_0F38 | VEX_OP_0F3A)), dst, 0, src, srcw);
FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = opcode;
+ inst[0] = GROUP_0F;
+ inst[1] = U8((op & VEX_OP_0F38) ? 0x38 : 0x3A);
+ inst[2] = op & 0xff;
return SLJIT_SUCCESS;
}
static SLJIT_INLINE sljit_s32 emit_sse2_load(struct sljit_compiler *compiler,
sljit_s32 single, sljit_s32 dst, sljit_s32 src, sljit_sw srcw)
{
- return emit_sse2(compiler, MOVSD_x_xm, single, dst, src, srcw);
+ return emit_groupf(compiler, MOVSD_x_xm | (single ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2, dst, src, srcw);
}
static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler,
sljit_s32 single, sljit_s32 dst, sljit_sw dstw, sljit_s32 src)
{
- return emit_sse2(compiler, MOVSD_xm_x, single, src, dst, dstw);
+ return emit_groupf(compiler, MOVSD_xm_x | (single ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2, src, dst, dstw);
}
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
- sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
- sljit_u8 *inst;
+ sljit_s32 dst_r;
+
+ CHECK_EXTRA_REGS(dst, dstw, (void)0);
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64)
compiler->mode32 = 0;
#endif
- inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP2, dst_r, 0, src, srcw);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = CVTTSD2SI_r_xm;
+ FAIL_IF(emit_groupf(compiler, CVTTSD2SI_r_xm | EX86_SELECT_F2_F3(op) | EX86_SSE2_OP2, dst_r, src, srcw));
if (dst & SLJIT_MEM)
return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
@@ -2501,14 +2949,15 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
sljit_s32 src, sljit_sw srcw)
{
sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG;
- sljit_u8 *inst;
+
+ CHECK_EXTRA_REGS(src, srcw, (void)0);
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW)
compiler->mode32 = 0;
#endif
- if (src & SLJIT_IMM) {
+ if (src == SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
srcw = (sljit_s32)srcw;
@@ -2518,16 +2967,13 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
srcw = 0;
}
- inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP1, dst_r, 0, src, srcw);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = CVTSI2SD_x_rm;
+ FAIL_IF(emit_groupf(compiler, CVTSI2SD_x_rm | EX86_SELECT_F2_F3(op) | EX86_SSE2_OP1, dst_r, src, srcw));
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
compiler->mode32 = 1;
#endif
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
@@ -2535,12 +2981,37 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
+ switch (GET_FLAG_TYPE(op)) {
+ case SLJIT_ORDERED_EQUAL:
+ /* Also: SLJIT_UNORDERED_OR_NOT_EQUAL */
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_groupf(compiler, CMPS_x_xm | EX86_SELECT_F2_F3(op) | EX86_SSE2, TMP_FREG, src2, src2w));
+
+ /* EQ */
+ FAIL_IF(emit_byte(compiler, 0));
+
+ src1 = TMP_FREG;
+ src2 = TMP_FREG;
+ src2w = 0;
+ break;
+
+ case SLJIT_ORDERED_LESS:
+ case SLJIT_UNORDERED_OR_GREATER:
+ /* Also: SLJIT_UNORDERED_OR_GREATER_EQUAL, SLJIT_ORDERED_LESS_EQUAL */
+ if (!FAST_IS_REG(src2)) {
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src2, src2w));
+ src2 = TMP_FREG;
+ }
+
+ return emit_groupf(compiler, UCOMISD_x_xm | EX86_SELECT_66(op) | EX86_SSE2, src2, src1, src1w);
+ }
+
if (!FAST_IS_REG(src1)) {
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w));
src1 = TMP_FREG;
}
- return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_F32_OP), src1, src2, src2w);
+ return emit_groupf(compiler, UCOMISD_x_xm | EX86_SELECT_66(op) | EX86_SSE2, src1, src2, src2w);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -2548,6 +3019,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
sljit_s32 src, sljit_sw srcw)
{
sljit_s32 dst_r;
+ sljit_u8 *inst;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
compiler->mode32 = 1;
@@ -2558,11 +3030,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
if (GET_OPCODE(op) == SLJIT_MOV_F64) {
if (FAST_IS_REG(dst))
- return emit_sse2_load(compiler, op & SLJIT_F32_OP, dst, src, srcw);
+ return emit_sse2_load(compiler, op & SLJIT_32, dst, src, srcw);
if (FAST_IS_REG(src))
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, src);
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src, srcw));
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, src);
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src, srcw));
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
}
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) {
@@ -2571,42 +3043,57 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
/* We overwrite the high bits of source. From SLJIT point of view,
this is not an issue.
Note: In SSE3, we could also use MOVDDUP and MOVSLDUP. */
- FAIL_IF(emit_sse2_logic(compiler, UNPCKLPD_x_xm, op & SLJIT_F32_OP, src, src, 0));
- }
- else {
- FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_F32_OP), TMP_FREG, src, srcw));
+ FAIL_IF(emit_groupf(compiler, UNPCKLPD_x_xm | ((op & SLJIT_32) ? EX86_PREF_66 : 0) | EX86_SSE2, src, src, 0));
+ } else {
+ FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_32), TMP_FREG, src, srcw));
src = TMP_FREG;
}
- FAIL_IF(emit_sse2_logic(compiler, CVTPD2PS_x_xm, op & SLJIT_F32_OP, dst_r, src, 0));
+ FAIL_IF(emit_groupf(compiler, CVTPD2PS_x_xm | ((op & SLJIT_32) ? EX86_PREF_66 : 0) | EX86_SSE2, dst_r, src, 0));
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
if (FAST_IS_REG(dst)) {
- dst_r = dst;
- if (dst != src)
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
- }
- else {
- dst_r = TMP_FREG;
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
+ dst_r = (dst == src) ? TMP_FREG : dst;
+
+ if (src & SLJIT_MEM)
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src, srcw));
+
+ FAIL_IF(emit_groupf(compiler, PCMPEQD_x_xm | EX86_PREF_66 | EX86_SSE2, dst_r, dst_r, 0));
+
+ inst = emit_x86_instruction(compiler, 2 | EX86_PREF_66 | EX86_SSE2_OP2, 0, 0, dst_r, 0);
+ inst[0] = GROUP_0F;
+ /* Same as PSRLD_x / PSRLQ_x */
+ inst[1] = (op & SLJIT_32) ? PSLLD_x_i8 : PSLLQ_x_i8;
+
+ if (GET_OPCODE(op) == SLJIT_ABS_F64) {
+ inst[2] |= 2 << 3;
+ FAIL_IF(emit_byte(compiler, 1));
+ } else {
+ inst[2] |= 6 << 3;
+ FAIL_IF(emit_byte(compiler, ((op & SLJIT_32) ? 31 : 63)));
+ }
+
+ if (dst_r != TMP_FREG)
+ dst_r = (src & SLJIT_MEM) ? TMP_FREG : src;
+ return emit_groupf(compiler, (GET_OPCODE(op) == SLJIT_NEG_F64 ? XORPD_x_xm : ANDPD_x_xm) | EX86_SSE2, dst, dst_r, 0);
}
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src, srcw));
+
switch (GET_OPCODE(op)) {
case SLJIT_NEG_F64:
- FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer : sse2_buffer + 8)));
+ FAIL_IF(emit_groupf(compiler, XORPD_x_xm | EX86_SELECT_66(op) | EX86_SSE2, TMP_FREG, SLJIT_MEM0(), (sljit_sw)((op & SLJIT_32) ? sse2_buffer : sse2_buffer + 8)));
break;
case SLJIT_ABS_F64:
- FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer + 4 : sse2_buffer + 12)));
+ FAIL_IF(emit_groupf(compiler, ANDPD_x_xm | EX86_SELECT_66(op) | EX86_SSE2, TMP_FREG, SLJIT_MEM0(), (sljit_sw)((op & SLJIT_32) ? sse2_buffer + 4 : sse2_buffer + 12)));
break;
}
- if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
- return SLJIT_SUCCESS;
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
@@ -2636,40 +3123,79 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
src2w = src1w;
}
else if (dst != src2)
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, dst_r, src1, src1w));
else {
dst_r = TMP_FREG;
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w));
}
}
else {
dst_r = TMP_FREG;
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w));
}
switch (GET_OPCODE(op)) {
case SLJIT_ADD_F64:
- FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
+ FAIL_IF(emit_groupf(compiler, ADDSD_x_xm | EX86_SELECT_F2_F3(op) | EX86_SSE2, dst_r, src2, src2w));
break;
case SLJIT_SUB_F64:
- FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
+ FAIL_IF(emit_groupf(compiler, SUBSD_x_xm | EX86_SELECT_F2_F3(op) | EX86_SSE2, dst_r, src2, src2w));
break;
case SLJIT_MUL_F64:
- FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
+ FAIL_IF(emit_groupf(compiler, MULSD_x_xm | EX86_SELECT_F2_F3(op) | EX86_SSE2, dst_r, src2, src2w));
break;
case SLJIT_DIV_F64:
- FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
+ FAIL_IF(emit_groupf(compiler, DIVSD_x_xm | EX86_SELECT_F2_F3(op) | EX86_SSE2, dst_r, src2, src2w));
break;
}
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2r(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_uw pref;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fop2r(compiler, op, dst_freg, src1, src1w, src2, src2w));
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 1;
+#endif
+
+ if (dst_freg == src1) {
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src2, src2w));
+ pref = EX86_SELECT_66(op) | EX86_SSE2;
+ FAIL_IF(emit_groupf(compiler, XORPD_x_xm | pref, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_groupf(compiler, ANDPD_x_xm | pref, TMP_FREG, SLJIT_MEM0(), (sljit_sw)((op & SLJIT_32) ? sse2_buffer : sse2_buffer + 8)));
+ return emit_groupf(compiler, XORPD_x_xm | pref, dst_freg, TMP_FREG, 0);
+ }
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w));
+ src1 = TMP_FREG;
+ src1w = 0;
+ }
+
+ if (dst_freg != src2)
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, dst_freg, src2, src2w));
+
+ pref = EX86_SELECT_66(op) | EX86_SSE2;
+ FAIL_IF(emit_groupf(compiler, XORPD_x_xm | pref, dst_freg, src1, src1w));
+ FAIL_IF(emit_groupf(compiler, ANDPD_x_xm | pref, dst_freg, SLJIT_MEM0(), (sljit_sw)((op & SLJIT_32) ? sse2_buffer : sse2_buffer + 8)));
+ return emit_groupf(compiler, XORPD_x_xm | pref, dst_freg, src1, src1w);
+}
+
/* --------------------------------------------------------------------- */
/* Conditional instructions */
/* --------------------------------------------------------------------- */
@@ -2691,9 +3217,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi
inst = (sljit_u8*)ensure_buf(compiler, 2);
PTR_FAIL_IF(!inst);
-
- *inst++ = 0;
- *inst++ = 0;
+ inst[0] = 0;
+ inst[1] = 0;
return label;
}
@@ -2708,7 +3233,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
PTR_FAIL_IF_NULL(jump);
- set_jump(jump, compiler, (type & SLJIT_REWRITABLE_JUMP) | ((type & 0xff) << TYPE_SHIFT));
+ set_jump(jump, compiler, (sljit_u32)((type & SLJIT_REWRITABLE_JUMP) | ((type & 0xff) << TYPE_SHIFT)));
type &= 0xff;
/* Worst case size. */
@@ -2721,8 +3246,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
inst = (sljit_u8*)ensure_buf(compiler, 2);
PTR_FAIL_IF_NULL(inst);
- *inst++ = 0;
- *inst++ = 1;
+ inst[0] = 0;
+ inst[1] = 1;
return jump;
}
@@ -2740,8 +3265,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
if (src == SLJIT_IMM) {
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF_NULL(jump);
- set_jump(jump, compiler, JUMP_ADDR | (type << TYPE_SHIFT));
- jump->u.target = srcw;
+ set_jump(jump, compiler, (sljit_u32)(JUMP_ADDR | (type << TYPE_SHIFT)));
+ jump->u.target = (sljit_uw)srcw;
/* Worst case size. */
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
@@ -2753,8 +3278,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
inst = (sljit_u8*)ensure_buf(compiler, 2);
FAIL_IF_NULL(inst);
- *inst++ = 0;
- *inst++ = 1;
+ inst[0] = 0;
+ inst[1] = 1;
}
else {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -2763,8 +3288,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
#endif
inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
FAIL_IF(!inst);
- *inst++ = GROUP_FF;
- *inst |= (type >= SLJIT_FAST_CALL) ? CALL_rm : JMP_rm;
+ inst[0] = GROUP_FF;
+ inst[1] = U8(inst[1] | ((type >= SLJIT_FAST_CALL) ? CALL_rm : JMP_rm));
}
return SLJIT_SUCCESS;
}
@@ -2774,10 +3299,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
sljit_s32 type)
{
sljit_u8 *inst;
- sljit_u8 cond_set = 0;
+ sljit_u8 cond_set;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
sljit_s32 reg;
-#endif
+#endif /* !SLJIT_CONFIG_X86_64 */
/* ADJUST_LOCAL_OFFSET and CHECK_EXTRA_REGS might overwrite these values. */
sljit_s32 dst_save = dst;
sljit_sw dstw_save = dstw;
@@ -2788,9 +3313,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
ADJUST_LOCAL_OFFSET(dst, dstw);
CHECK_EXTRA_REGS(dst, dstw, (void)0);
- type &= 0xff;
/* setcc = jcc + 0x10. */
- cond_set = get_jump_code(type) + 0x10;
+ cond_set = U8(get_jump_code((sljit_uw)type) + 0x10);
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst)) {
@@ -2798,13 +3322,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
FAIL_IF(!inst);
INC_SIZE(4 + 3);
/* Set low register to conditional flag. */
- *inst++ = (reg_map[TMP_REG1] <= 7) ? REX : REX_B;
- *inst++ = GROUP_0F;
- *inst++ = cond_set;
- *inst++ = MOD_REG | reg_lmap[TMP_REG1];
- *inst++ = REX | (reg_map[TMP_REG1] <= 7 ? 0 : REX_R) | (reg_map[dst] <= 7 ? 0 : REX_B);
- *inst++ = OR_rm8_r8;
- *inst++ = MOD_REG | (reg_lmap[TMP_REG1] << 3) | reg_lmap[dst];
+ inst[0] = (reg_map[TMP_REG1] <= 7) ? REX : REX_B;
+ inst[1] = GROUP_0F;
+ inst[2] = cond_set;
+ inst[3] = MOD_REG | reg_lmap[TMP_REG1];
+ inst[4] = U8(REX | (reg_map[TMP_REG1] <= 7 ? 0 : REX_R) | (reg_map[dst] <= 7 ? 0 : REX_B));
+ inst[5] = OR_rm8_r8;
+ inst[6] = U8(MOD_REG | (reg_lmap[TMP_REG1] << 3) | reg_lmap[dst]);
return SLJIT_SUCCESS;
}
@@ -2814,15 +3338,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
FAIL_IF(!inst);
INC_SIZE(4 + 4);
/* Set low register to conditional flag. */
- *inst++ = (reg_map[reg] <= 7) ? REX : REX_B;
- *inst++ = GROUP_0F;
- *inst++ = cond_set;
- *inst++ = MOD_REG | reg_lmap[reg];
- *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : (REX_B | REX_R));
+ inst[0] = (reg_map[reg] <= 7) ? REX : REX_B;
+ inst[1] = GROUP_0F;
+ inst[2] = cond_set;
+ inst[3] = MOD_REG | reg_lmap[reg];
+ inst[4] = REX_W | (reg_map[reg] <= 7 ? 0 : (REX_B | REX_R));
/* The movzx instruction does not affect flags. */
- *inst++ = GROUP_0F;
- *inst++ = MOVZX_r_rm8;
- *inst = MOD_REG | (reg_lmap[reg] << 3) | reg_lmap[reg];
+ inst[5] = GROUP_0F;
+ inst[6] = MOVZX_r_rm8;
+ inst[7] = U8(MOD_REG | (reg_lmap[reg] << 3) | reg_lmap[reg]);
if (reg != TMP_REG1)
return SLJIT_SUCCESS;
@@ -2832,165 +3356,1314 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
}
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
-#else
+#else /* !SLJIT_CONFIG_X86_64 */
+ SLJIT_ASSERT(reg_map[TMP_REG1] < 4);
+
/* The SLJIT_CONFIG_X86_32 code path starts here. */
- if (GET_OPCODE(op) < SLJIT_ADD && FAST_IS_REG(dst)) {
- if (reg_map[dst] <= 4) {
- /* Low byte is accessible. */
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 3 + 3);
- FAIL_IF(!inst);
- INC_SIZE(3 + 3);
- /* Set low byte to conditional flag. */
- *inst++ = GROUP_0F;
- *inst++ = cond_set;
- *inst++ = MOD_REG | reg_map[dst];
-
- *inst++ = GROUP_0F;
- *inst++ = MOVZX_r_rm8;
- *inst = MOD_REG | (reg_map[dst] << 3) | reg_map[dst];
+ if (GET_OPCODE(op) < SLJIT_ADD && FAST_IS_REG(dst) && reg_map[dst] <= 4) {
+ /* Low byte is accessible. */
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 3 + 3);
+ FAIL_IF(!inst);
+ INC_SIZE(3 + 3);
+ /* Set low byte to conditional flag. */
+ inst[0] = GROUP_0F;
+ inst[1] = cond_set;
+ inst[2] = U8(MOD_REG | reg_map[dst]);
+
+ inst[3] = GROUP_0F;
+ inst[4] = MOVZX_r_rm8;
+ inst[5] = U8(MOD_REG | (reg_map[dst] << 3) | reg_map[dst]);
+ return SLJIT_SUCCESS;
+ }
+
+ if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst) && reg_map[dst] <= 4) {
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 3 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(3 + 2);
+
+ /* Set low byte to conditional flag. */
+ inst[0] = GROUP_0F;
+ inst[1] = cond_set;
+ inst[2] = U8(MOD_REG | reg_map[TMP_REG1]);
+
+ inst[3] = OR_rm8_r8;
+ inst[4] = U8(MOD_REG | (reg_map[TMP_REG1] << 3) | reg_map[dst]);
+ return SLJIT_SUCCESS;
+ }
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 3 + 3);
+ FAIL_IF(!inst);
+ INC_SIZE(3 + 3);
+ /* Set low byte to conditional flag. */
+ inst[0] = GROUP_0F;
+ inst[1] = cond_set;
+ inst[2] = U8(MOD_REG | reg_map[TMP_REG1]);
+
+ inst[3] = GROUP_0F;
+ inst[4] = MOVZX_r_rm8;
+ inst[5] = U8(MOD_REG | (reg_map[TMP_REG1] << 3) | reg_map[TMP_REG1]);
+
+ if (GET_OPCODE(op) < SLJIT_ADD)
+ return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
+#endif /* SLJIT_CONFIG_X86_64 */
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_reg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
+{
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ sljit_s32 dst = dst_reg;
+ sljit_sw dstw = 0;
+#endif /* SLJIT_CONFIG_X86_32 */
+ sljit_sw src2w = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_select(compiler, type, dst_reg, src1, src1w, src2_reg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ CHECK_EXTRA_REGS(dst, dstw, (void)0);
+ CHECK_EXTRA_REGS(src1, src1w, (void)0);
+ CHECK_EXTRA_REGS(src2_reg, src2w, (void)0);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = type & SLJIT_32;
+#endif /* SLJIT_CONFIG_X86_64 */
+ type &= ~SLJIT_32;
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ if (dst & SLJIT_MEM) {
+ if (src1 == SLJIT_IMM || (!(src1 & SLJIT_MEM) && (src2_reg & SLJIT_MEM))) {
+ EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
+ src1 = src2_reg;
+ src1w = src2w;
+ type ^= 0x1;
+ } else
+ EMIT_MOV(compiler, TMP_REG1, 0, src2_reg, src2w);
+
+ dst_reg = TMP_REG1;
+ } else {
+#endif /* SLJIT_CONFIG_X86_32 */
+ if (dst_reg != src2_reg) {
+ if (dst_reg == src1) {
+ src1 = src2_reg;
+ src1w = src2w;
+ type ^= 0x1;
+ } else {
+ if (ADDRESSING_DEPENDS_ON(src1, dst_reg)) {
+ EMIT_MOV(compiler, dst_reg, 0, src1, src1w);
+ src1 = src2_reg;
+ src1w = src2w;
+ type ^= 0x1;
+ } else
+ EMIT_MOV(compiler, dst_reg, 0, src2_reg, src2w);
+ }
+ }
+
+ if (SLJIT_UNLIKELY(src1 == SLJIT_IMM)) {
+ SLJIT_ASSERT(dst_reg != TMP_REG1);
+ EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
+ src1 = TMP_REG1;
+ src1w = 0;
+ }
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ }
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ if (sljit_has_cpu_feature(SLJIT_HAS_CMOV))
+ FAIL_IF(emit_groupf(compiler, U8(get_jump_code((sljit_uw)type) - 0x40), dst_reg, src1, src1w));
+ else
+ FAIL_IF(emit_cmov_generic(compiler, type, dst_reg, src1, src1w));
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ if (dst_reg == TMP_REG1)
+ return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
+#endif /* SLJIT_CONFIG_X86_32 */
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
+{
+ sljit_u8* inst;
+ sljit_uw size;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fselect(compiler, type, dst_freg, src1, src1w, src2_freg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 1;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (dst_freg != src2_freg) {
+ if (dst_freg == src1) {
+ src1 = src2_freg;
+ src1w = 0;
+ type ^= 0x1;
+ } else
+ FAIL_IF(emit_sse2_load(compiler, type & SLJIT_32, dst_freg, src2_freg, 0));
+ }
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(2);
+ inst[0] = U8(get_jump_code((sljit_uw)(type & ~SLJIT_32) ^ 0x1) - 0x10);
+
+ size = compiler->size;
+ FAIL_IF(emit_sse2_load(compiler, type & SLJIT_32, dst_freg, src1, src1w));
+
+ inst[1] = U8(compiler->size - size);
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 alignment = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ sljit_uw op;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_mov(compiler, type, freg, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 1;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ switch (reg_size) {
+ case 4:
+ op = EX86_SSE2;
+ break;
+ case 5:
+ if (!(cpu_feature_list & CPU_FEATURE_AVX2))
+ return SLJIT_ERR_UNSUPPORTED;
+ op = EX86_SSE2 | VEX_256;
+ break;
+ default:
+ return SLJIT_ERR_UNSUPPORTED;
+ }
+
+ if (!(srcdst & SLJIT_MEM))
+ alignment = reg_size;
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (elem_size == 2 || elem_size == 3) {
+ op |= alignment >= reg_size ? MOVAPS_x_xm : MOVUPS_x_xm;
+
+ if (elem_size == 3)
+ op |= EX86_PREF_66;
+
+ if (type & SLJIT_SIMD_STORE)
+ op += 1;
+ } else
+ return SLJIT_ERR_UNSUPPORTED;
+ } else {
+ op |= ((type & SLJIT_SIMD_STORE) ? MOVDQA_xm_x : MOVDQA_x_xm)
+ | (alignment >= reg_size ? EX86_PREF_66 : EX86_PREF_F3);
+ }
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (op & VEX_256)
+ return emit_vex_instruction(compiler, op, freg, 0, srcdst, srcdstw);
+
+ return emit_groupf(compiler, op, freg, srcdst, srcdstw);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_u8 *inst;
+ sljit_u8 opcode = 0;
+ sljit_uw size;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_replicate(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (!(type & SLJIT_SIMD_FLOAT)) {
+ CHECK_EXTRA_REGS(src, srcw, (void)0);
+ }
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ if ((type & SLJIT_SIMD_FLOAT) ? (elem_size < 2 || elem_size > 3) : (elem_size > 2))
+ return SLJIT_ERR_UNSUPPORTED;
+#else /* !SLJIT_CONFIG_X86_32 */
+ compiler->mode32 = 1;
+
+ if (elem_size > 3 || ((type & SLJIT_SIMD_FLOAT) && elem_size < 2))
+ return SLJIT_ERR_UNSUPPORTED;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ if (cpu_feature_list & CPU_FEATURE_AVX2) {
+ if (reg_size < 4 || reg_size > 5)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (src != SLJIT_IMM && (reg_size == 5 || elem_size < 3 || !(type & SLJIT_SIMD_FLOAT))) {
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (!(src & SLJIT_MEM) && !(type & SLJIT_SIMD_FLOAT)) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (elem_size >= 3)
+ compiler->mode32 = 0;
+#endif /* SLJIT_CONFIG_X86_64 */
+ FAIL_IF(emit_groupf(compiler, MOVD_x_rm | EX86_PREF_66 | EX86_SSE2_OP1, freg, src, srcw));
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 1;
+#endif /* SLJIT_CONFIG_X86_64 */
+ src = freg;
+ srcw = 0;
+ }
+
+ switch (elem_size) {
+ case 0:
+ size = VPBROADCASTB_x_xm | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2;
+ break;
+ case 1:
+ size = VPBROADCASTW_x_xm | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2;
+ break;
+ case 2:
+ size = ((type & SLJIT_SIMD_FLOAT) ? VBROADCASTSS_x_xm : VPBROADCASTD_x_xm) | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2;
+ break;
+ default:
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ size = VBROADCASTSD_x_xm | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2;
+#else /* !SLJIT_CONFIG_X86_32 */
+ size = ((type & SLJIT_SIMD_FLOAT) ? VBROADCASTSD_x_xm : VPBROADCASTQ_x_xm) | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2;
+#endif /* SLJIT_CONFIG_X86_32 */
+ break;
+ }
+
+ if (reg_size == 5)
+ size |= VEX_256;
+
+ return emit_vex_instruction(compiler, size, freg, 0, src, srcw);
+ }
+ } else if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (src == SLJIT_IMM) {
+ if (reg_size == 5)
+ return emit_vex_instruction(compiler, XORPD_x_xm | VEX_256 | (elem_size == 3 ? EX86_PREF_66 : 0) | EX86_SSE2 | VEX_SSE2_OPV, freg, freg, freg, 0);
+
+ return emit_groupf(compiler, XORPD_x_xm | (elem_size == 3 ? EX86_PREF_66 : 0) | EX86_SSE2, freg, freg, 0);
+ }
+
+ if (elem_size == 2 && freg != src) {
+ FAIL_IF(emit_sse2_load(compiler, 1, freg, src, srcw));
+ src = freg;
+ srcw = 0;
+ }
+
+ FAIL_IF(emit_groupf(compiler, (elem_size == 2 ? SHUFPS_x_xm : MOVDDUP_x_xm) | (elem_size == 2 ? 0 : EX86_PREF_F2) | EX86_SSE2, freg, src, srcw));
+
+ if (elem_size == 2)
+ return emit_byte(compiler, 0);
+ return SLJIT_SUCCESS;
+ }
+
+ if (src == SLJIT_IMM) {
+ if (elem_size == 0) {
+ srcw = (sljit_u8)srcw;
+ srcw |= srcw << 8;
+ srcw |= srcw << 16;
+ elem_size = 2;
+ } else if (elem_size == 1) {
+ srcw = (sljit_u16)srcw;
+ srcw |= srcw << 16;
+ elem_size = 2;
+ }
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (elem_size == 2 && (sljit_s32)srcw == -1)
+ srcw = -1;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (srcw == 0 || srcw == -1) {
+ if (reg_size == 5)
+ return emit_vex_instruction(compiler, (srcw == 0 ? PXOR_x_xm : PCMPEQD_x_xm) | VEX_256 | EX86_PREF_66 | EX86_SSE2 | VEX_SSE2_OPV, freg, freg, freg, 0);
+
+ return emit_groupf(compiler, (srcw == 0 ? PXOR_x_xm : PCMPEQD_x_xm) | EX86_PREF_66 | EX86_SSE2, freg, freg, 0);
+ }
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (elem_size == 3)
+ FAIL_IF(emit_load_imm64(compiler, TMP_REG1, srcw));
+ else
+#endif /* SLJIT_CONFIG_X86_64 */
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcw);
+
+ src = TMP_REG1;
+ srcw = 0;
+ }
+
+ size = 2;
+ opcode = MOVD_x_rm;
+
+ switch (elem_size) {
+ case 0:
+ if (!FAST_IS_REG(src)) {
+ opcode = 0x3a /* Prefix of PINSRB_x_rm_i8. */;
+ size = 3;
+ }
+ break;
+ case 1:
+ if (!FAST_IS_REG(src))
+ opcode = PINSRW_x_rm_i8;
+ break;
+ case 2:
+ break;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ case 3:
+ /* MOVQ */
+ compiler->mode32 = 0;
+ break;
+#endif /* SLJIT_CONFIG_X86_64 */
+ }
+
+ inst = emit_x86_instruction(compiler, size | EX86_PREF_66 | EX86_SSE2_OP1, freg, 0, src, srcw);
+ FAIL_IF(!inst);
+ inst[0] = GROUP_0F;
+ inst[1] = opcode;
+
+ if (reg_size == 5) {
+ SLJIT_ASSERT(opcode == MOVD_x_rm);
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ size = VPBROADCASTD_x_xm;
+#else /* !SLJIT_CONFIG_X86_32 */
+ size = (elem_size == 3) ? VPBROADCASTQ_x_xm : VPBROADCASTD_x_xm;
+#endif /* SLJIT_CONFIG_X86_32 */
+ return emit_vex_instruction(compiler, size | VEX_256 | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, 0, freg, 0);
+ }
+
+ if (size == 3) {
+ SLJIT_ASSERT(opcode == 0x3a);
+ inst[2] = PINSRB_x_rm_i8;
+ }
+
+ if (opcode != MOVD_x_rm)
+ FAIL_IF(emit_byte(compiler, 0));
+
+ switch (elem_size) {
+ case 0:
+ FAIL_IF(emit_groupf(compiler, PXOR_x_xm | EX86_PREF_66 | EX86_SSE2, TMP_FREG, TMP_FREG, 0));
+ return emit_groupf_ext(compiler, PSHUFB_x_xm | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, TMP_FREG, 0);
+ case 1:
+ FAIL_IF(emit_groupf(compiler, PSHUFLW_x_xm | EX86_PREF_F2 | EX86_SSE2, freg, freg, 0));
+ FAIL_IF(emit_byte(compiler, 0));
+ /* fallthrough */
+ default:
+ FAIL_IF(emit_groupf(compiler, PSHUFD_x_xm | EX86_PREF_66 | EX86_SSE2, freg, freg, 0));
+ return emit_byte(compiler, 0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ case 3:
+ compiler->mode32 = 1;
+ FAIL_IF(emit_groupf(compiler, PSHUFD_x_xm | EX86_PREF_66 | EX86_SSE2, freg, freg, 0));
+ return emit_byte(compiler, 0x44);
+#endif /* SLJIT_CONFIG_X86_64 */
+ }
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg, sljit_s32 lane_index,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_u8 *inst;
+ sljit_u8 opcode = 0;
+ sljit_uw size;
+ sljit_s32 freg_orig = freg;
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ sljit_s32 srcdst_is_ereg = 0;
+ sljit_s32 srcdst_orig = 0;
+ sljit_sw srcdstw_orig = 0;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_mov(compiler, type, freg, lane_index, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size == 5) {
+ if (!(cpu_feature_list & CPU_FEATURE_AVX2))
+ return SLJIT_ERR_UNSUPPORTED;
+ } else if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ if ((type & SLJIT_SIMD_FLOAT) ? (elem_size < 2 || elem_size > 3) : elem_size > 2)
+ return SLJIT_ERR_UNSUPPORTED;
+#else /* SLJIT_CONFIG_X86_32 */
+ if (elem_size > 3 || ((type & SLJIT_SIMD_FLOAT) && elem_size < 2))
+ return SLJIT_ERR_UNSUPPORTED;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 1;
+#else /* !SLJIT_CONFIG_X86_64 */
+ if (!(type & SLJIT_SIMD_FLOAT)) {
+ CHECK_EXTRA_REGS(srcdst, srcdstw, srcdst_is_ereg = 1);
+
+ if ((type & SLJIT_SIMD_STORE) && ((srcdst_is_ereg && elem_size < 2) || (elem_size == 0 && (type & SLJIT_SIMD_LANE_SIGNED) && FAST_IS_REG(srcdst) && reg_map[srcdst] >= 4))) {
+ srcdst_orig = srcdst;
+ srcdstw_orig = srcdstw;
+ srcdst = TMP_REG1;
+ srcdstw = 0;
+ }
+ }
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (type & SLJIT_SIMD_LANE_ZERO) {
+ if (lane_index == 0) {
+ if (!(type & SLJIT_SIMD_FLOAT)) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (elem_size == 3) {
+ compiler->mode32 = 0;
+ elem_size = 2;
+ }
+#endif /* SLJIT_CONFIG_X86_64 */
+ if (srcdst == SLJIT_IMM) {
+ if (elem_size == 0)
+ srcdstw = (sljit_u8)srcdstw;
+ else if (elem_size == 1)
+ srcdstw = (sljit_u16)srcdstw;
+
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcdstw);
+ srcdst = TMP_REG1;
+ srcdstw = 0;
+ elem_size = 2;
+ }
+
+ if (elem_size == 2) {
+ if (reg_size == 4)
+ return emit_groupf(compiler, MOVD_x_rm | EX86_PREF_66 | EX86_SSE2_OP1, freg, srcdst, srcdstw);
+ return emit_vex_instruction(compiler, MOVD_x_rm | VEX_AUTO_W | EX86_PREF_66 | EX86_SSE2_OP1, freg, 0, srcdst, srcdstw);
+ }
+ } else if (srcdst & SLJIT_MEM) {
+ SLJIT_ASSERT(elem_size == 2 || elem_size == 3);
+
+ if (reg_size == 4)
+ return emit_groupf(compiler, MOVSD_x_xm | (elem_size == 2 ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2, freg, srcdst, srcdstw);
+ return emit_vex_instruction(compiler, MOVSD_x_xm | (elem_size == 2 ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2, freg, 0, srcdst, srcdstw);
+ } else if (elem_size == 3) {
+ if (reg_size == 4)
+ return emit_groupf(compiler, MOVQ_x_xm | EX86_PREF_F3 | EX86_SSE2, freg, srcdst, 0);
+ return emit_vex_instruction(compiler, MOVQ_x_xm | EX86_PREF_F3 | EX86_SSE2, freg, 0, srcdst, 0);
+ }
+ }
+
+ if (reg_size == 5 && lane_index >= (1 << (4 - elem_size))) {
+ freg = TMP_FREG;
+ lane_index -= (1 << (4 - elem_size));
+ } else if ((type & SLJIT_SIMD_FLOAT) && freg == srcdst) {
+ FAIL_IF(emit_sse2_load(compiler, elem_size == 2, TMP_FREG, srcdst, srcdstw));
+ srcdst = TMP_FREG;
+ srcdstw = 0;
+ }
+
+ size = ((!(type & SLJIT_SIMD_FLOAT) || elem_size != 2) ? EX86_PREF_66 : 0)
+ | ((type & SLJIT_SIMD_FLOAT) ? XORPD_x_xm : PXOR_x_xm) | EX86_SSE2;
+
+ if (reg_size == 5)
+ FAIL_IF(emit_vex_instruction(compiler, size | VEX_256 | VEX_SSE2_OPV, freg, freg, freg, 0));
+ else
+ FAIL_IF(emit_groupf(compiler, size, freg, freg, 0));
+ } else if (reg_size == 5 && lane_index >= (1 << (4 - elem_size))) {
+ FAIL_IF(emit_vex_instruction(compiler, ((type & SLJIT_SIMD_FLOAT) ? VEXTRACTF128_x_ym : VEXTRACTI128_x_ym) | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | EX86_SSE2, freg, 0, TMP_FREG, 0));
+ FAIL_IF(emit_byte(compiler, 1));
+
+ freg = TMP_FREG;
+ lane_index -= (1 << (4 - elem_size));
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (elem_size == 3) {
+ if (srcdst & SLJIT_MEM) {
+ if (type & SLJIT_SIMD_STORE)
+ size = lane_index == 0 ? MOVLPD_m_x : MOVHPD_m_x;
+ else
+ size = lane_index == 0 ? MOVLPD_x_m : MOVHPD_x_m;
+
+ FAIL_IF(emit_groupf(compiler, size | EX86_PREF_66 | EX86_SSE2, freg, srcdst, srcdstw));
+
+ /* In case of store, freg is not TMP_FREG. */
+ } else if (type & SLJIT_SIMD_STORE) {
+ if (lane_index == 1)
+ return emit_groupf(compiler, MOVHLPS_x_x | EX86_SSE2, srcdst, freg, 0);
+ return emit_sse2_load(compiler, 0, srcdst, freg, 0);
+ } else {
+ if (lane_index == 1)
+ FAIL_IF(emit_groupf(compiler, MOVLHPS_x_x | EX86_SSE2, freg, srcdst, 0));
+ else
+ FAIL_IF(emit_sse2_store(compiler, 0, freg, 0, srcdst));
+ }
+ } else if (type & SLJIT_SIMD_STORE) {
+ if (lane_index == 0)
+ return emit_sse2_store(compiler, 1, srcdst, srcdstw, freg);
+
+ if (srcdst & SLJIT_MEM) {
+ FAIL_IF(emit_groupf_ext(compiler, EXTRACTPS_x_xm | EX86_PREF_66 | VEX_OP_0F3A | EX86_SSE2, freg, srcdst, srcdstw));
+ return emit_byte(compiler, U8(lane_index));
+ }
+
+ if (srcdst == freg)
+ size = SHUFPS_x_xm | EX86_SSE2;
+ else {
+ if (cpu_feature_list & CPU_FEATURE_AVX) {
+ FAIL_IF(emit_vex_instruction(compiler, SHUFPS_x_xm | EX86_SSE2 | VEX_SSE2_OPV, srcdst, freg, freg, 0));
+ return emit_byte(compiler, U8(lane_index));
+ }
+
+ switch (lane_index) {
+ case 1:
+ size = MOVSHDUP_x_xm | EX86_PREF_F3 | EX86_SSE2;
+ break;
+ case 2:
+ size = MOVHLPS_x_x | EX86_SSE2;
+ break;
+ default:
+ SLJIT_ASSERT(lane_index == 3);
+ size = PSHUFD_x_xm | EX86_PREF_66 | EX86_SSE2;
+ break;
+ }
+ }
+
+ FAIL_IF(emit_groupf(compiler, size, srcdst, freg, 0));
+
+ size &= 0xff;
+ if (size == SHUFPS_x_xm || size == PSHUFD_x_xm)
+ return emit_byte(compiler, U8(lane_index));
+
return SLJIT_SUCCESS;
+ } else {
+ if (lane_index != 0 || (srcdst & SLJIT_MEM)) {
+ FAIL_IF(emit_groupf_ext(compiler, INSERTPS_x_xm | EX86_PREF_66 | VEX_OP_0F3A | EX86_SSE2, freg, srcdst, srcdstw));
+ FAIL_IF(emit_byte(compiler, U8(lane_index << 4)));
+ } else
+ FAIL_IF(emit_sse2_store(compiler, 1, freg, 0, srcdst));
}
- /* Low byte is not accessible. */
- if (cpu_has_cmov == -1)
- get_cpu_features();
+ if (freg != TMP_FREG || (type & SLJIT_SIMD_STORE))
+ return SLJIT_SUCCESS;
- if (cpu_has_cmov) {
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 1);
- /* a xor reg, reg operation would overwrite the flags. */
- EMIT_MOV(compiler, dst, 0, SLJIT_IMM, 0);
+ SLJIT_ASSERT(reg_size == 5);
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
- FAIL_IF(!inst);
- INC_SIZE(3);
+ if (type & SLJIT_SIMD_LANE_ZERO) {
+ FAIL_IF(emit_vex_instruction(compiler, VPERMPD_y_ym | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | VEX_W | EX86_SSE2, freg_orig, 0, TMP_FREG, 0));
+ return emit_byte(compiler, 0x4e);
+ }
+
+ FAIL_IF(emit_vex_instruction(compiler, VINSERTF128_y_y_xm | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | EX86_SSE2 | VEX_SSE2_OPV, freg_orig, freg_orig, TMP_FREG, 0));
+ return emit_byte(compiler, 1);
+ }
+
+ if (srcdst == SLJIT_IMM) {
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcdstw);
+ srcdst = TMP_REG1;
+ srcdstw = 0;
+ }
+
+ size = 3;
+
+ switch (elem_size) {
+ case 0:
+ opcode = (type & SLJIT_SIMD_STORE) ? PEXTRB_rm_x_i8 : PINSRB_x_rm_i8;
+ break;
+ case 1:
+ if (!(type & SLJIT_SIMD_STORE)) {
+ size = 2;
+ opcode = PINSRW_x_rm_i8;
+ } else
+ opcode = PEXTRW_rm_x_i8;
+ break;
+ case 2:
+ opcode = (type & SLJIT_SIMD_STORE) ? PEXTRD_rm_x_i8 : PINSRD_x_rm_i8;
+ break;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ case 3:
+ /* PINSRQ / PEXTRQ */
+ opcode = (type & SLJIT_SIMD_STORE) ? PEXTRD_rm_x_i8 : PINSRD_x_rm_i8;
+ compiler->mode32 = 0;
+ break;
+#endif /* SLJIT_CONFIG_X86_64 */
+ }
- *inst++ = GROUP_0F;
- /* cmovcc = setcc - 0x50. */
- *inst++ = cond_set - 0x50;
- *inst++ = MOD_REG | (reg_map[dst] << 3) | reg_map[TMP_REG1];
+ inst = emit_x86_instruction(compiler, size | EX86_PREF_66 | EX86_SSE2_OP1, freg, 0, srcdst, srcdstw);
+ FAIL_IF(!inst);
+ inst[0] = GROUP_0F;
+
+ if (size == 3) {
+ inst[1] = 0x3a;
+ inst[2] = opcode;
+ } else
+ inst[1] = opcode;
+
+ FAIL_IF(emit_byte(compiler, U8(lane_index)));
+
+ if (!(type & SLJIT_SIMD_LANE_SIGNED) || (srcdst & SLJIT_MEM)) {
+ if (freg == TMP_FREG && !(type & SLJIT_SIMD_STORE)) {
+ SLJIT_ASSERT(reg_size == 5);
+
+ if (type & SLJIT_SIMD_LANE_ZERO) {
+ FAIL_IF(emit_vex_instruction(compiler, VPERMQ_y_ym | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | VEX_W | EX86_SSE2, freg_orig, 0, TMP_FREG, 0));
+ return emit_byte(compiler, 0x4e);
+ }
+
+ FAIL_IF(emit_vex_instruction(compiler, VINSERTI128_y_y_xm | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | EX86_SSE2 | VEX_SSE2_OPV, freg_orig, freg_orig, TMP_FREG, 0));
+ return emit_byte(compiler, 1);
+ }
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ if (srcdst_orig & SLJIT_MEM)
+ return emit_mov(compiler, srcdst_orig, srcdstw_orig, TMP_REG1, 0);
+#endif /* SLJIT_CONFIG_X86_32 */
+ return SLJIT_SUCCESS;
+ }
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (elem_size >= 3)
+ return SLJIT_SUCCESS;
+
+ compiler->mode32 = (type & SLJIT_32);
+
+ size = 2;
+
+ if (elem_size == 0)
+ size |= EX86_REX;
+
+ if (elem_size == 2) {
+ if (type & SLJIT_32)
return SLJIT_SUCCESS;
+
+ SLJIT_ASSERT(!(compiler->mode32));
+ size = 1;
+ }
+
+ inst = emit_x86_instruction(compiler, size, srcdst, 0, srcdst, 0);
+ FAIL_IF(!inst);
+
+ if (size != 1) {
+ inst[0] = GROUP_0F;
+ inst[1] = U8((elem_size == 0) ? MOVSX_r_rm8 : MOVSX_r_rm16);
+ } else
+ inst[0] = MOVSXD_r_rm;
+#else /* !SLJIT_CONFIG_X86_64 */
+ if (elem_size >= 2)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(emit_groupf(compiler, (elem_size == 0) ? MOVSX_r_rm8 : MOVSX_r_rm16,
+ (srcdst_orig != 0 && FAST_IS_REG(srcdst_orig)) ? srcdst_orig : srcdst, srcdst, 0));
+
+ if (srcdst_orig & SLJIT_MEM)
+ return emit_mov(compiler, srcdst_orig, srcdstw_orig, TMP_REG1, 0);
+#endif /* SLJIT_CONFIG_X86_64 */
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_s32 src_lane_index)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_uw pref;
+ sljit_u8 byte;
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ sljit_s32 opcode3 = TMP_REG1;
+#else /* !SLJIT_CONFIG_X86_32 */
+ sljit_s32 opcode3 = SLJIT_S0;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_replicate(compiler, type, freg, src, src_lane_index));
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 1;
+#endif /* SLJIT_CONFIG_X86_64 */
+ SLJIT_ASSERT(reg_map[opcode3] == 3);
+
+ if (reg_size == 5) {
+ if (!(cpu_feature_list & CPU_FEATURE_AVX2))
+ return SLJIT_ERR_UNSUPPORTED;
+ } else if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ pref = 0;
+ byte = U8(src_lane_index);
+
+ if (elem_size == 3) {
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 5) {
+ if (src_lane_index == 0)
+ return emit_vex_instruction(compiler, VBROADCASTSD_x_xm | VEX_256 | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, 0, src, 0);
+
+ FAIL_IF(emit_vex_instruction(compiler, VPERMPD_y_ym | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | VEX_W | EX86_SSE2, freg, 0, src, 0));
+
+ byte = U8(byte | (byte << 2));
+ return emit_byte(compiler, U8(byte | (byte << 4)));
+ }
+
+ if (src_lane_index == 0)
+ return emit_groupf(compiler, MOVDDUP_x_xm | EX86_PREF_F2 | EX86_SSE2, freg, src, 0);
+
+ /* Changes it to SHUFPD_x_xm. */
+ pref = EX86_PREF_66;
+ } else if (elem_size != 2)
+ return SLJIT_ERR_UNSUPPORTED;
+ else if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 5) {
+ SLJIT_ASSERT(elem_size == 2);
+
+ if (src_lane_index == 0)
+ return emit_vex_instruction(compiler, VBROADCASTSS_x_xm | VEX_256 | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, 0, src, 0);
+
+ FAIL_IF(emit_vex_instruction(compiler, VPERMPD_y_ym | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | VEX_W | EX86_SSE2, freg, 0, src, 0));
+
+ byte = 0x44;
+ if (src_lane_index >= 4) {
+ byte = 0xee;
+ src_lane_index -= 4;
+ }
+
+ FAIL_IF(emit_byte(compiler, byte));
+ FAIL_IF(emit_vex_instruction(compiler, SHUFPS_x_xm | VEX_256 | pref | EX86_SSE2 | VEX_SSE2_OPV, freg, freg, freg, 0));
+ byte = U8(src_lane_index);
+ } else if (freg != src && (cpu_feature_list & CPU_FEATURE_AVX)) {
+ FAIL_IF(emit_vex_instruction(compiler, SHUFPS_x_xm | pref | EX86_SSE2 | VEX_SSE2_OPV, freg, src, src, 0));
+ } else {
+ if (freg != src)
+ FAIL_IF(emit_groupf(compiler, MOVAPS_x_xm | pref | EX86_SSE2, freg, src, 0));
+
+ FAIL_IF(emit_groupf(compiler, SHUFPS_x_xm | pref | EX86_SSE2, freg, freg, 0));
}
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1 + 3 + 3 + 1);
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
- /* Set al to conditional flag. */
- *inst++ = GROUP_0F;
- *inst++ = cond_set;
- *inst++ = MOD_REG | 0 /* eax */;
-
- *inst++ = GROUP_0F;
- *inst++ = MOVZX_r_rm8;
- *inst++ = MOD_REG | (reg_map[dst] << 3) | 0 /* eax */;
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
+ if (elem_size == 2) {
+ byte = U8(byte | (byte << 2));
+ byte = U8(byte | (byte << 4));
+ } else
+ byte = U8(byte | (byte << 1));
+
+ return emit_byte(compiler, U8(byte));
+ }
+
+ if (type & SLJIT_SIMD_TEST)
return SLJIT_SUCCESS;
+
+ if (elem_size == 0) {
+ if (reg_size == 5 && src_lane_index >= 16) {
+ FAIL_IF(emit_vex_instruction(compiler, VPERMQ_y_ym | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | VEX_W | EX86_SSE2, freg, 0, src, 0));
+ FAIL_IF(emit_byte(compiler, src_lane_index >= 24 ? 0xff : 0xaa));
+ src_lane_index &= 0x7;
+ src = freg;
+ }
+
+ if ((freg != src && !(cpu_feature_list & CPU_FEATURE_AVX2)) || src_lane_index != 0) {
+ pref = 0;
+
+ if ((src_lane_index & 0x3) == 0) {
+ pref = EX86_PREF_66;
+ byte = U8(src_lane_index >> 2);
+ } else if (src_lane_index < 8 && (src_lane_index & 0x1) == 0) {
+ pref = EX86_PREF_F2;
+ byte = U8(src_lane_index >> 1);
+ } else {
+ if (freg == src || !(cpu_feature_list & CPU_FEATURE_AVX2)) {
+ if (freg != src)
+ FAIL_IF(emit_groupf(compiler, MOVDQA_x_xm | EX86_PREF_66 | EX86_SSE2, freg, src, 0));
+
+ FAIL_IF(emit_groupf(compiler, PSRLDQ_x | EX86_PREF_66 | EX86_SSE2_OP2, opcode3, freg, 0));
+ } else
+ FAIL_IF(emit_vex_instruction(compiler, PSRLDQ_x | EX86_PREF_66 | EX86_SSE2_OP2 | VEX_SSE2_OPV, opcode3, freg, src, 0));
+
+ FAIL_IF(emit_byte(compiler, U8(src_lane_index)));
+ }
+
+ if (pref != 0) {
+ FAIL_IF(emit_groupf(compiler, PSHUFLW_x_xm | pref | EX86_SSE2, freg, src, 0));
+ FAIL_IF(emit_byte(compiler, byte));
+ }
+
+ src = freg;
+ }
+
+ if (cpu_feature_list & CPU_FEATURE_AVX2)
+ return emit_vex_instruction(compiler, VPBROADCASTB_x_xm | (reg_size == 5 ? VEX_256 : 0) | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, 0, src, 0);
+
+ SLJIT_ASSERT(reg_size == 4);
+ FAIL_IF(emit_groupf(compiler, PXOR_x_xm | EX86_PREF_66 | EX86_SSE2, TMP_FREG, TMP_FREG, 0));
+ return emit_groupf_ext(compiler, PSHUFB_x_xm | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, TMP_FREG, 0);
}
- if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst) && reg_map[dst] <= 4) {
- SLJIT_ASSERT(reg_map[SLJIT_R0] == 0);
+ if ((cpu_feature_list & CPU_FEATURE_AVX2) && src_lane_index == 0 && elem_size <= 3) {
+ switch (elem_size) {
+ case 1:
+ pref = VPBROADCASTW_x_xm | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2;
+ break;
+ case 2:
+ pref = VPBROADCASTD_x_xm | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2;
+ break;
+ default:
+ pref = VPBROADCASTQ_x_xm | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2;
+ break;
+ }
- if (dst != SLJIT_R0) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 2 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1 + 3 + 2 + 1);
- /* Set low register to conditional flag. */
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
- *inst++ = GROUP_0F;
- *inst++ = cond_set;
- *inst++ = MOD_REG | 0 /* eax */;
- *inst++ = OR_rm8_r8;
- *inst++ = MOD_REG | (0 /* eax */ << 3) | reg_map[dst];
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
+ if (reg_size == 5)
+ pref |= VEX_256;
+
+ return emit_vex_instruction(compiler, pref, freg, 0, src, 0);
+ }
+
+ if (reg_size == 5) {
+ switch (elem_size) {
+ case 1:
+ byte = U8(src_lane_index & 0x3);
+ src_lane_index >>= 2;
+ pref = PSHUFLW_x_xm | VEX_256 | ((src_lane_index & 1) == 0 ? EX86_PREF_F2 : EX86_PREF_F3) | EX86_SSE2;
+ break;
+ case 2:
+ byte = U8(src_lane_index & 0x3);
+ src_lane_index >>= 1;
+ pref = PSHUFD_x_xm | VEX_256 | EX86_PREF_66 | EX86_SSE2;
+ break;
+ case 3:
+ pref = 0;
+ break;
+ default:
+ FAIL_IF(emit_vex_instruction(compiler, VPERMQ_y_ym | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | VEX_W | EX86_SSE2, freg, 0, src, 0));
+ return emit_byte(compiler, U8(src_lane_index == 0 ? 0x44 : 0xee));
}
- else {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 2 + 3 + 2 + 2);
+
+ if (pref != 0) {
+ FAIL_IF(emit_vex_instruction(compiler, pref, freg, 0, src, 0));
+ byte = U8(byte | (byte << 2));
+ FAIL_IF(emit_byte(compiler, U8(byte | (byte << 4))));
+
+ if (src_lane_index == 0)
+ return emit_vex_instruction(compiler, VPBROADCASTQ_x_xm | VEX_256 | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, 0, freg, 0);
+
+ src = freg;
+ }
+
+ FAIL_IF(emit_vex_instruction(compiler, VPERMQ_y_ym | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | VEX_W | EX86_SSE2, freg, 0, src, 0));
+ byte = U8(src_lane_index);
+ byte = U8(byte | (byte << 2));
+ return emit_byte(compiler, U8(byte | (byte << 4)));
+ }
+
+ switch (elem_size) {
+ case 1:
+ byte = U8(src_lane_index & 0x3);
+ src_lane_index >>= 1;
+ pref = (src_lane_index & 2) == 0 ? EX86_PREF_F2 : EX86_PREF_F3;
+
+ FAIL_IF(emit_groupf(compiler, PSHUFLW_x_xm | pref | EX86_SSE2, freg, src, 0));
+ byte = U8(byte | (byte << 2));
+ FAIL_IF(emit_byte(compiler, U8(byte | (byte << 4))));
+
+ if ((cpu_feature_list & CPU_FEATURE_AVX2) && pref == EX86_PREF_F2)
+ return emit_vex_instruction(compiler, VPBROADCASTD_x_xm | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, 0, freg, 0);
+
+ src = freg;
+ /* fallthrough */
+ case 2:
+ byte = U8(src_lane_index);
+ byte = U8(byte | (byte << 2));
+ break;
+ default:
+ byte = U8(src_lane_index << 1);
+ byte = U8(byte | (byte << 2) | 0x4);
+ break;
+ }
+
+ FAIL_IF(emit_groupf(compiler, PSHUFD_x_xm | EX86_PREF_66 | EX86_SSE2, freg, src, 0));
+ return emit_byte(compiler, U8(byte | (byte << 4)));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_extend(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 elem2_size = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ sljit_u8 opcode;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_extend(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 1;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (reg_size == 5) {
+ if (!(cpu_feature_list & CPU_FEATURE_AVX2))
+ return SLJIT_ERR_UNSUPPORTED;
+ } else if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (elem_size != 2 || elem2_size != 3)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ return emit_groupf(compiler, CVTPS2PD_x_xm | EX86_SSE2, freg, src, srcw);
+ return emit_vex_instruction(compiler, CVTPS2PD_x_xm | VEX_256 | EX86_SSE2, freg, 0, src, srcw);
+ }
+
+ switch (elem_size) {
+ case 0:
+ if (elem2_size == 1)
+ opcode = (type & SLJIT_SIMD_EXTEND_SIGNED) ? PMOVSXBW_x_xm : PMOVZXBW_x_xm;
+ else if (elem2_size == 2)
+ opcode = (type & SLJIT_SIMD_EXTEND_SIGNED) ? PMOVSXBD_x_xm : PMOVZXBD_x_xm;
+ else if (elem2_size == 3)
+ opcode = (type & SLJIT_SIMD_EXTEND_SIGNED) ? PMOVSXBQ_x_xm : PMOVZXBQ_x_xm;
+ else
+ return SLJIT_ERR_UNSUPPORTED;
+ break;
+ case 1:
+ if (elem2_size == 2)
+ opcode = (type & SLJIT_SIMD_EXTEND_SIGNED) ? PMOVSXWD_x_xm : PMOVZXWD_x_xm;
+ else if (elem2_size == 3)
+ opcode = (type & SLJIT_SIMD_EXTEND_SIGNED) ? PMOVSXWQ_x_xm : PMOVZXWQ_x_xm;
+ else
+ return SLJIT_ERR_UNSUPPORTED;
+ break;
+ case 2:
+ if (elem2_size == 3)
+ opcode = (type & SLJIT_SIMD_EXTEND_SIGNED) ? PMOVSXDQ_x_xm : PMOVZXDQ_x_xm;
+ else
+ return SLJIT_ERR_UNSUPPORTED;
+ break;
+ default:
+ return SLJIT_ERR_UNSUPPORTED;
+ }
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ return emit_groupf_ext(compiler, opcode | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, src, srcw);
+ return emit_vex_instruction(compiler, opcode | VEX_256 | EX86_PREF_66 | VEX_OP_0F38 | EX86_SSE2, freg, 0, src, srcw);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_sign(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 dst_r;
+ sljit_uw pref;
+ sljit_u8 *inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_sign(compiler, type, freg, dst, dstw));
+
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ CHECK_EXTRA_REGS(dst, dstw, (void)0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 1;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (elem_size > 3 || ((type & SLJIT_SIMD_FLOAT) && elem_size < 2))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (reg_size == 4) {
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ pref = EX86_PREF_66 | EX86_SSE2_OP2;
+
+ switch (elem_size) {
+ case 1:
+ FAIL_IF(emit_groupf(compiler, PACKSSWB_x_xm | EX86_PREF_66 | EX86_SSE2, TMP_FREG, freg, 0));
+ freg = TMP_FREG;
+ break;
+ case 2:
+ pref = EX86_SSE2_OP2;
+ break;
+ }
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ FAIL_IF(emit_groupf(compiler, (elem_size < 2 ? PMOVMSKB_r_x : MOVMSKPS_r_x) | pref, dst_r, freg, 0));
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = type & SLJIT_32;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (elem_size == 1) {
+ inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 8, dst_r, 0);
FAIL_IF(!inst);
- INC_SIZE(2 + 3 + 2 + 2);
- /* Set low register to conditional flag. */
- *inst++ = XCHG_r_rm;
- *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1];
- *inst++ = GROUP_0F;
- *inst++ = cond_set;
- *inst++ = MOD_REG | 1 /* ecx */;
- *inst++ = OR_rm8_r8;
- *inst++ = MOD_REG | (1 /* ecx */ << 3) | 0 /* eax */;
- *inst++ = XCHG_r_rm;
- *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1];
+ inst[1] |= SHR;
}
+
+ if (dst_r == TMP_REG1)
+ return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
+
return SLJIT_SUCCESS;
}
- /* Set TMP_REG1 to the bit. */
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1 + 3 + 3 + 1);
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
- /* Set al to conditional flag. */
- *inst++ = GROUP_0F;
- *inst++ = cond_set;
- *inst++ = MOD_REG | 0 /* eax */;
+ if (reg_size != 5 || !(cpu_feature_list & CPU_FEATURE_AVX2))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
- *inst++ = GROUP_0F;
- *inst++ = MOVZX_r_rm8;
- *inst++ = MOD_REG | (0 << 3) /* eax */ | 0 /* eax */;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
+ if (elem_size == 1) {
+ FAIL_IF(emit_vex_instruction(compiler, VEXTRACTI128_x_ym | VEX_256 | EX86_PREF_66 | VEX_OP_0F3A | EX86_SSE2, freg, 0, TMP_FREG, 0));
+ FAIL_IF(emit_byte(compiler, 1));
+ FAIL_IF(emit_vex_instruction(compiler, PACKSSWB_x_xm | VEX_256 | EX86_PREF_66 | EX86_SSE2 | VEX_SSE2_OPV, TMP_FREG, freg, TMP_FREG, 0));
+ FAIL_IF(emit_groupf(compiler, PMOVMSKB_r_x | EX86_PREF_66 | EX86_SSE2_OP2, dst_r, TMP_FREG, 0));
+ } else {
+ pref = MOVMSKPS_r_x | VEX_256 | EX86_SSE2_OP2;
- if (GET_OPCODE(op) < SLJIT_ADD)
+ if (elem_size == 0)
+ pref = PMOVMSKB_r_x | VEX_256 | EX86_PREF_66 | EX86_SSE2_OP2;
+ else if (elem_size == 3)
+ pref |= EX86_PREF_66;
+
+ FAIL_IF(emit_vex_instruction(compiler, pref, dst_r, 0, freg, 0));
+ }
+
+ if (dst_r == TMP_REG1) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = type & SLJIT_32;
+#endif /* SLJIT_CONFIG_X86_64 */
return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
+ }
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
- return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src_freg)
+{
+ sljit_uw op = ((type & SLJIT_SIMD_FLOAT) ? MOVAPS_x_xm : MOVDQA_x_xm) | EX86_SSE2;
+
+ SLJIT_ASSERT(SLJIT_SIMD_GET_REG_SIZE(type) == 4);
+
+ if (!(type & SLJIT_SIMD_FLOAT) || SLJIT_SIMD_GET_ELEM_SIZE(type) == 3)
+ op |= EX86_PREF_66;
+
+ return emit_groupf(compiler, op, dst_freg, src_freg, 0);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_op2(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src1_freg, sljit_s32 src2_freg)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 needs_move = 0;
+ sljit_uw op = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_op2(compiler, type, dst_freg, src1_freg, src2_freg));
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 1;
#endif /* SLJIT_CONFIG_X86_64 */
+
+ if (reg_size == 5) {
+ if (!(cpu_feature_list & CPU_FEATURE_AVX2))
+ return SLJIT_ERR_UNSUPPORTED;
+ } else if (reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ switch (SLJIT_SIMD_GET_OPCODE(type)) {
+ case SLJIT_SIMD_OP2_AND:
+ op = (type & SLJIT_SIMD_FLOAT) ? ANDPD_x_xm : PAND_x_xm;
+
+ if (!(type & SLJIT_SIMD_FLOAT) || elem_size == 3)
+ op |= EX86_PREF_66;
+ break;
+ case SLJIT_SIMD_OP2_OR:
+ op = (type & SLJIT_SIMD_FLOAT) ? ORPD_x_xm : POR_x_xm;
+
+ if (!(type & SLJIT_SIMD_FLOAT) || elem_size == 3)
+ op |= EX86_PREF_66;
+ break;
+ case SLJIT_SIMD_OP2_XOR:
+ op = (type & SLJIT_SIMD_FLOAT) ? XORPD_x_xm : PXOR_x_xm;
+
+ if (!(type & SLJIT_SIMD_FLOAT) || elem_size == 3)
+ op |= EX86_PREF_66;
+ break;
+ }
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ needs_move = dst_freg != src1_freg && dst_freg != src2_freg;
+
+ if (reg_size == 5 || (needs_move && (cpu_feature_list & CPU_FEATURE_AVX2))) {
+ if (reg_size == 5)
+ op |= VEX_256;
+
+ return emit_vex_instruction(compiler, op | EX86_SSE2 | VEX_SSE2_OPV, dst_freg, src1_freg, src2_freg, 0);
+ }
+
+ if (needs_move) {
+ FAIL_IF(emit_simd_mov(compiler, type, dst_freg, src1_freg));
+ } else if (dst_freg != src1_freg) {
+ SLJIT_ASSERT(dst_freg == src2_freg);
+ src2_freg = src1_freg;
+ }
+
+ FAIL_IF(emit_groupf(compiler, op | EX86_SSE2, dst_freg, src2_freg, 0));
+ return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_load(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
+ sljit_s32 mem_reg)
{
- sljit_u8* inst;
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_load(compiler, op, dst_reg, mem_reg));
+
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op1(compiler, op, dst_reg, 0, SLJIT_MEM1(mem_reg), 0);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_store(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src_reg,
+ sljit_s32 mem_reg,
+ sljit_s32 temp_reg)
+{
+ sljit_uw pref;
+ sljit_s32 free_reg = TMP_REG1;
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ sljit_sw srcw = 0;
+ sljit_sw tempw = 0;
+#endif /* SLJIT_CONFIG_X86_32 */
CHECK_ERROR();
- CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
+ CHECK(check_sljit_emit_atomic_store(compiler, op, src_reg, mem_reg, temp_reg));
+ CHECK_EXTRA_REGS(src_reg, srcw, (void)0);
+ CHECK_EXTRA_REGS(temp_reg, tempw, (void)0);
+
+ SLJIT_ASSERT(FAST_IS_REG(src_reg) || src_reg == SLJIT_MEM1(SLJIT_SP));
+ SLJIT_ASSERT(FAST_IS_REG(temp_reg) || temp_reg == SLJIT_MEM1(SLJIT_SP));
+ op = GET_OPCODE(op);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- dst_reg &= ~SLJIT_I32_OP;
+ if ((src_reg & SLJIT_MEM) || (op == SLJIT_MOV_U8 && reg_map[src_reg] >= 4)) {
+ /* Src is virtual register or its low byte is not accessible. */
+ SLJIT_ASSERT(src_reg != SLJIT_R1);
+ free_reg = src_reg;
- if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV) || (dst_reg >= SLJIT_R3 && dst_reg <= SLJIT_S3))
- return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);
-#else
- if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV))
- return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);
-#endif
+ EMIT_MOV(compiler, TMP_REG1, 0, src_reg, srcw);
+ src_reg = TMP_REG1;
- /* ADJUST_LOCAL_OFFSET is not needed. */
- CHECK_EXTRA_REGS(src, srcw, (void)0);
+ if (mem_reg == src_reg)
+ mem_reg = TMP_REG1;
+ }
+#endif /* SLJIT_CONFIG_X86_32 */
+ if (temp_reg != SLJIT_R0) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- compiler->mode32 = dst_reg & SLJIT_I32_OP;
- dst_reg &= ~SLJIT_I32_OP;
-#endif
+ compiler->mode32 = 0;
- if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcw);
- src = TMP_REG1;
- srcw = 0;
+ EMIT_MOV(compiler, free_reg, 0, SLJIT_R0, 0);
+ EMIT_MOV(compiler, SLJIT_R0, 0, temp_reg, 0);
+
+ if (src_reg == SLJIT_R0)
+ src_reg = free_reg;
+ if (mem_reg == SLJIT_R0)
+ mem_reg = free_reg;
+#else /* !SLJIT_CONFIG_X86_64 */
+ if (src_reg == TMP_REG1 && mem_reg == SLJIT_R0 && (free_reg & SLJIT_MEM)) {
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_R1, 0);
+ EMIT_MOV(compiler, SLJIT_R1, 0, SLJIT_R0, 0);
+ EMIT_MOV(compiler, SLJIT_R0, 0, temp_reg, tempw);
+
+ mem_reg = SLJIT_R1;
+ free_reg = SLJIT_R1;
+ } else {
+ EMIT_MOV(compiler, free_reg, 0, SLJIT_R0, 0);
+ EMIT_MOV(compiler, SLJIT_R0, 0, temp_reg, tempw);
+
+ if (src_reg == SLJIT_R0)
+ src_reg = free_reg;
+ if (mem_reg == SLJIT_R0)
+ mem_reg = free_reg;
+ }
+#endif /* SLJIT_CONFIG_X86_64 */
}
- inst = emit_x86_instruction(compiler, 2, dst_reg, 0, src, srcw);
- FAIL_IF(!inst);
- *inst++ = GROUP_0F;
- *inst = get_jump_code(type & 0xff) - 0x40;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = op != SLJIT_MOV && op != SLJIT_MOV_P;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ /* Lock prefix. */
+ FAIL_IF(emit_byte(compiler, GROUP_LOCK));
+
+ pref = 0;
+ if (op == SLJIT_MOV_U16)
+ pref = EX86_HALF_ARG | EX86_PREF_66;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (op == SLJIT_MOV_U8)
+ pref = EX86_REX;
+#endif /* SLJIT_CONFIG_X86_64 */
+
+ FAIL_IF(emit_groupf(compiler, (op == SLJIT_MOV_U8 ? CMPXCHG_rm8_r : CMPXCHG_rm_r) | pref, src_reg, SLJIT_MEM1(mem_reg), 0));
+
+ if (temp_reg != SLJIT_R0) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 0;
+ return emit_mov(compiler, SLJIT_R0, 0, TMP_REG1, 0);
+#else /* !SLJIT_CONFIG_X86_64 */
+ EMIT_MOV(compiler, SLJIT_R0, 0, free_reg, 0);
+ if (free_reg != TMP_REG1)
+ return emit_mov(compiler, free_reg, 0, (free_reg == SLJIT_R1) ? SLJIT_MEM1(SLJIT_SP) : TMP_REG1, 0);
+#endif /* SLJIT_CONFIG_X86_64 */
+ }
return SLJIT_SUCCESS;
}
@@ -3057,8 +4730,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi
inst = (sljit_u8*)ensure_buf(compiler, 2);
PTR_FAIL_IF(!inst);
- *inst++ = 0;
- *inst++ = 2;
+ inst[0] = 0;
+ inst[1] = 2;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if (dst & SLJIT_MEM)
@@ -3111,8 +4784,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj
inst = (sljit_u8*)ensure_buf(compiler, 2);
PTR_FAIL_IF(!inst);
- *inst++ = 0;
- *inst++ = 3;
+ inst[0] = 0;
+ inst[1] = 3;
return put_label;
}
@@ -3123,9 +4796,9 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_uw)), 0);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- sljit_unaligned_store_sw((void*)addr, new_target - (addr + 4) - (sljit_uw)executable_offset);
+ sljit_unaligned_store_sw((void*)addr, (sljit_sw)(new_target - (addr + 4) - (sljit_uw)executable_offset));
#else
- sljit_unaligned_store_sw((void*)addr, (sljit_sw) new_target);
+ sljit_unaligned_store_sw((void*)addr, (sljit_sw)new_target);
#endif
SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_uw)), 1);
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitProtExecAllocator.c b/src/3rdparty/pcre2/src/sljit/sljitProtExecAllocator.c
index 147175afa6..915411fbed 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitProtExecAllocator.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitProtExecAllocator.c
@@ -66,7 +66,7 @@
/* --------------------------------------------------------------------- */
/* 64 KByte. */
-#define CHUNK_SIZE 0x10000
+#define CHUNK_SIZE (sljit_uw)0x10000
struct chunk_header {
void *executable;
@@ -194,7 +194,7 @@ static SLJIT_INLINE struct chunk_header* alloc_chunk(sljit_uw size)
if (fd == -1)
return NULL;
- if (ftruncate(fd, size)) {
+ if (ftruncate(fd, (off_t)size)) {
close(fd);
return NULL;
}
@@ -281,7 +281,7 @@ struct free_block {
#define AS_FREE_BLOCK(base, offset) \
((struct free_block*)(((sljit_u8*)base) + offset))
#define MEM_START(base) ((void*)((base) + 1))
-#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7) & ~7)
+#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7u) & ~(sljit_uw)7)
static struct free_block* free_blocks;
static sljit_uw allocated_size;
diff --git a/src/3rdparty/pcre2/src/sljit/sljitUtils.c b/src/3rdparty/pcre2/src/sljit/sljitUtils.c
index 9bce714735..967593b157 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitUtils.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitUtils.c
@@ -131,12 +131,12 @@ static SLJIT_INLINE int open_dev_zero(void)
#ifdef _WIN32
-static SLJIT_INLINE sljit_sw get_page_alignment(void) {
+static SLJIT_INLINE sljit_uw get_page_alignment(void) {
SYSTEM_INFO si;
- static sljit_sw sljit_page_align;
+ static sljit_uw sljit_page_align = 0;
if (!sljit_page_align) {
GetSystemInfo(&si);
- sljit_page_align = si.dwPageSize - 1;
+ sljit_page_align = (sljit_uw)si.dwPageSize - 1;
}
return sljit_page_align;
}
@@ -145,18 +145,21 @@ static SLJIT_INLINE sljit_sw get_page_alignment(void) {
#include <unistd.h>
-static SLJIT_INLINE sljit_sw get_page_alignment(void) {
- static sljit_sw sljit_page_align = -1;
- if (sljit_page_align < 0) {
+static SLJIT_INLINE sljit_uw get_page_alignment(void) {
+ static sljit_uw sljit_page_align = 0;
+
+ sljit_sw align;
+
+ if (!sljit_page_align) {
#ifdef _SC_PAGESIZE
- sljit_page_align = sysconf(_SC_PAGESIZE);
+ align = sysconf(_SC_PAGESIZE);
#else
- sljit_page_align = getpagesize();
+ align = getpagesize();
#endif
/* Should never happen. */
- if (sljit_page_align < 0)
- sljit_page_align = 4096;
- sljit_page_align--;
+ if (align < 0)
+ align = 4096;
+ sljit_page_align = (sljit_uw)align - 1;
}
return sljit_page_align;
}
@@ -227,7 +230,7 @@ SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_free_stack(struct sljit_stack *st
SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_free_stack(struct sljit_stack *stack, void *allocator_data)
{
SLJIT_UNUSED_ARG(allocator_data);
- munmap((void*)stack->min_start, stack->end - stack->min_start);
+ munmap((void*)stack->min_start, (size_t)(stack->end - stack->min_start));
SLJIT_FREE(stack, allocator_data);
}
@@ -237,7 +240,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_FUNC sljit_allocate_stack(slj
{
struct sljit_stack *stack;
void *ptr;
- sljit_sw page_align;
+ sljit_uw page_align;
SLJIT_UNUSED_ARG(allocator_data);
@@ -295,7 +298,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_u8 *SLJIT_FUNC sljit_stack_resize(struct sljit_st
#if defined _WIN32 || defined(POSIX_MADV_DONTNEED)
sljit_uw aligned_old_start;
sljit_uw aligned_new_start;
- sljit_sw page_align;
+ sljit_uw page_align;
#endif
if ((new_start < stack->min_start) || (new_start >= stack->end))
diff --git a/src/3rdparty/pcre2/src/sljit/sljitWXExecAllocator.c b/src/3rdparty/pcre2/src/sljit/sljitWXExecAllocator.c
index 72d5b8dd2b..6893813155 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitWXExecAllocator.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitWXExecAllocator.c
@@ -59,38 +59,15 @@
#include <sys/mman.h>
#ifdef __NetBSD__
-#if defined(PROT_MPROTECT)
-#define check_se_protected(ptr, size) (0)
#define SLJIT_PROT_WX PROT_MPROTECT(PROT_EXEC)
-#else /* !PROT_MPROTECT */
-#ifdef _NETBSD_SOURCE
-#include <sys/param.h>
-#else /* !_NETBSD_SOURCE */
-typedef unsigned int u_int;
-#define devmajor_t sljit_s32
-#endif /* _NETBSD_SOURCE */
-#include <sys/sysctl.h>
-#include <unistd.h>
-
-#define check_se_protected(ptr, size) netbsd_se_protected()
-
-static SLJIT_INLINE int netbsd_se_protected(void)
-{
- int mib[3];
- int paxflags;
- size_t len = sizeof(paxflags);
-
- mib[0] = CTL_PROC;
- mib[1] = getpid();
- mib[2] = PROC_PID_PAXFLAGS;
-
- if (SLJIT_UNLIKELY(sysctl(mib, 3, &paxflags, &len, NULL, 0) < 0))
- return -1;
-
- return (paxflags & CTL_PROC_PAXFLAGS_MPROTECT) ? -1 : 0;
-}
-#endif /* PROT_MPROTECT */
+#define check_se_protected(ptr, size) (0)
#else /* POSIX */
+#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
+#include <pthread.h>
+#define SLJIT_SE_LOCK() pthread_mutex_lock(&se_lock)
+#define SLJIT_SE_UNLOCK() pthread_mutex_unlock(&se_lock)
+#endif /* !SLJIT_SINGLE_THREADED */
+
#define check_se_protected(ptr, size) generic_se_protected(ptr, size)
static SLJIT_INLINE int generic_se_protected(void *ptr, sljit_uw size)
@@ -102,22 +79,20 @@ static SLJIT_INLINE int generic_se_protected(void *ptr, sljit_uw size)
}
#endif /* NetBSD */
-#if defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED
+#ifndef SLJIT_SE_LOCK
#define SLJIT_SE_LOCK()
+#endif
+#ifndef SLJIT_SE_UNLOCK
#define SLJIT_SE_UNLOCK()
-#else /* !SLJIT_SINGLE_THREADED */
-#include <pthread.h>
-#define SLJIT_SE_LOCK() pthread_mutex_lock(&se_lock)
-#define SLJIT_SE_UNLOCK() pthread_mutex_unlock(&se_lock)
-#endif /* SLJIT_SINGLE_THREADED */
-
+#endif
#ifndef SLJIT_PROT_WX
#define SLJIT_PROT_WX 0
-#endif /* !SLJIT_PROT_WX */
+#endif
SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
{
-#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
+#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) \
+ && !defined(__NetBSD__)
static pthread_mutex_t se_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
static int se_protected = !SLJIT_PROT_WX;