summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c')
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c136
1 files changed, 81 insertions, 55 deletions
diff --git a/src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c b/src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c
index 61a32f23e9..92d940ddc2 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitExecAllocator.c
@@ -66,7 +66,7 @@
/* --------------------------------------------------------------------- */
/* 64 KByte. */
-#define CHUNK_SIZE 0x10000
+#define CHUNK_SIZE (sljit_uw)0x10000u
/*
alloc_chunk / free_chunk :
@@ -79,6 +79,7 @@
*/
#ifdef _WIN32
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
{
@@ -91,95 +92,115 @@ static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
VirtualFree(chunk, 0, MEM_RELEASE);
}
-#else
-
-#ifdef __APPLE__
-#ifdef MAP_ANON
-/* Configures TARGET_OS_OSX when appropriate */
-#include <TargetConditionals.h>
-
-#if TARGET_OS_OSX && defined(MAP_JIT)
-#include <sys/utsname.h>
-#endif /* TARGET_OS_OSX && MAP_JIT */
-
-#ifdef MAP_JIT
+#else /* POSIX */
+#if defined(__APPLE__) && defined(MAP_JIT)
/*
On macOS systems, returns MAP_JIT if it is defined _and_ we're running on a
- version where it's OK to have more than one JIT block.
+ version where it's OK to have more than one JIT block or where MAP_JIT is
+ required.
On non-macOS systems, returns MAP_JIT if it is defined.
*/
+#include <TargetConditionals.h>
+#if TARGET_OS_OSX
+#if defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86
+#ifdef MAP_ANON
+#include <sys/utsname.h>
+#include <stdlib.h>
+
+#define SLJIT_MAP_JIT (get_map_jit_flag())
+
static SLJIT_INLINE int get_map_jit_flag()
{
-#if TARGET_OS_OSX
- sljit_sw page_size = get_page_alignment() + 1;
+ size_t page_size;
void *ptr;
+ struct utsname name;
static int map_jit_flag = -1;
- /*
- The following code is thread safe because multiple initialization
- sets map_jit_flag to the same value and the code has no side-effects.
- Changing the kernel version witout system restart is (very) unlikely.
- */
- if (map_jit_flag == -1) {
- struct utsname name;
-
+ if (map_jit_flag < 0) {
map_jit_flag = 0;
uname(&name);
- /* Kernel version for 10.14.0 (Mojave) */
+ /* Kernel version for 10.14.0 (Mojave) or later */
if (atoi(name.release) >= 18) {
+ page_size = get_page_alignment() + 1;
/* Only use MAP_JIT if a hardened runtime is used */
+ ptr = mmap(NULL, page_size, PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
- ptr = mmap(NULL, page_size, PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (ptr == MAP_FAILED) {
- map_jit_flag = MAP_JIT;
- } else {
+ if (ptr != MAP_FAILED)
munmap(ptr, page_size);
- }
+ else
+ map_jit_flag = MAP_JIT;
}
}
-
return map_jit_flag;
-#else /* !TARGET_OS_OSX */
- return MAP_JIT;
-#endif /* TARGET_OS_OSX */
}
-
-#endif /* MAP_JIT */
#endif /* MAP_ANON */
-#endif /* __APPLE__ */
+#else /* !SLJIT_CONFIG_X86 */
+#if !(defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM)
+#error "Unsupported architecture"
+#endif /* SLJIT_CONFIG_ARM */
+#include <AvailabilityMacros.h>
+#include <pthread.h>
+
+#define SLJIT_MAP_JIT (MAP_JIT)
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec) \
+ apple_update_wx_flags(enable_exec)
+
+static SLJIT_INLINE void apple_update_wx_flags(sljit_s32 enable_exec)
+{
+#if MAC_OS_X_VERSION_MIN_REQUIRED >= 110000
+ pthread_jit_write_protect_np(enable_exec);
+#else
+#error "Must target Big Sur or newer"
+#endif /* BigSur */
+}
+#endif /* SLJIT_CONFIG_X86 */
+#else /* !TARGET_OS_OSX */
+#define SLJIT_MAP_JIT (MAP_JIT)
+#endif /* TARGET_OS_OSX */
+#endif /* __APPLE__ && MAP_JIT */
+#ifndef SLJIT_UPDATE_WX_FLAGS
+#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
+#endif /* !SLJIT_UPDATE_WX_FLAGS */
+#ifndef SLJIT_MAP_JIT
+#define SLJIT_MAP_JIT (0)
+#endif /* !SLJIT_MAP_JIT */
static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
{
void *retval;
- const int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
-
-#ifdef MAP_ANON
+ int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
+ int flags = MAP_PRIVATE;
+ int fd = -1;
- int flags = MAP_PRIVATE | MAP_ANON;
-
-#ifdef MAP_JIT
- flags |= get_map_jit_flag();
+#ifdef PROT_MAX
+ prot |= PROT_MAX(prot);
#endif
- retval = mmap(NULL, size, prot, flags, -1, 0);
+#ifdef MAP_ANON
+ flags |= MAP_ANON | SLJIT_MAP_JIT;
#else /* !MAP_ANON */
if (SLJIT_UNLIKELY((dev_zero < 0) && open_dev_zero()))
return NULL;
- retval = mmap(NULL, size, prot, MAP_PRIVATE, dev_zero, 0);
+ fd = dev_zero;
#endif /* MAP_ANON */
+ retval = mmap(NULL, size, prot, flags, fd, 0);
if (retval == MAP_FAILED)
- retval = NULL;
- else {
- if (mprotect(retval, size, prot) < 0) {
- munmap(retval, size);
- retval = NULL;
- }
+ return NULL;
+
+#ifdef __FreeBSD__
+ /* HardenedBSD's mmap lies, so check permissions again */
+ if (mprotect(retval, size, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
+ munmap(retval, size);
+ return NULL;
}
+#endif /* FreeBSD */
+
+ SLJIT_UPDATE_WX_FLAGS(retval, (uint8_t *)retval + size, 0);
return retval;
}
@@ -189,7 +210,7 @@ static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
munmap(chunk, size);
}
-#endif
+#endif /* windows */
/* --------------------------------------------------------------------- */
/* Common functions */
@@ -214,7 +235,7 @@ struct free_block {
#define AS_FREE_BLOCK(base, offset) \
((struct free_block*)(((sljit_u8*)base) + offset))
#define MEM_START(base) ((void*)(((sljit_u8*)base) + sizeof(struct block_header)))
-#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7) & ~7)
+#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7u) & ~(sljit_uw)7)
static struct free_block* free_blocks;
static sljit_uw allocated_size;
@@ -261,6 +282,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
while (free_block) {
if (free_block->size >= size) {
chunk_size = free_block->size;
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
if (chunk_size > size + 64) {
/* We just cut a block from the end of the free block. */
chunk_size -= size;
@@ -326,6 +348,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
allocated_size -= header->size;
/* Connecting free blocks together if possible. */
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
/* If header->prev_size == 0, free_block will equal to header.
In this case, free_block->header.size will be > 0. */
@@ -358,6 +381,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
}
}
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
SLJIT_ALLOCATOR_UNLOCK();
}
@@ -367,6 +391,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
struct free_block* next_free_block;
SLJIT_ALLOCATOR_LOCK();
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
free_block = free_blocks;
while (free_block) {
@@ -381,5 +406,6 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
}
SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
+ SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
SLJIT_ALLOCATOR_UNLOCK();
}