aboutsummaryrefslogtreecommitdiffstats
path: root/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2013-04-15 11:34:00 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2013-04-15 12:47:53 +0200
commit2cd8a90bd4d171ed2404822b6046455a94d4b6ed (patch)
treeed8eda56d54cb13d777be1cfa294c85ddd76e583 /src/3rdparty/masm/wtf/OSAllocatorPosix.cpp
parent5bf33901429e64ab91f30037e25ec04aab4b4c11 (diff)
parentbec019b5fe35e1701c944eb340df458d5e3d1cdb (diff)
Merge branch 'master' of ssh://codereview.qt-project.org:29418/playground/v4vm into v4
This is the initial merge of the v4vm JS engine, designed specifically for QML. The engine is tested on Linux and Mac OS X, works on x86, x86-64 and ARM. Change-Id: I826b72cfa3d3575007b70d78604080582db568db Reviewed-by: Lars Knoll <lars.knoll@digia.com>
Diffstat (limited to 'src/3rdparty/masm/wtf/OSAllocatorPosix.cpp')
-rw-r--r--src/3rdparty/masm/wtf/OSAllocatorPosix.cpp193
1 files changed, 193 insertions, 0 deletions
diff --git a/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp b/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp
new file mode 100644
index 0000000000..7b2a55c6b6
--- /dev/null
+++ b/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "OSAllocator.h"
+
+#if OS(UNIX)
+
+#include "PageAllocation.h"
+#include <errno.h>
+#include <sys/mman.h>
+#include <wtf/Assertions.h>
+#include <wtf/UnusedParam.h>
+
+namespace WTF {
+
+void* OSAllocator::reserveUncommitted(size_t bytes, Usage usage, bool writable, bool executable, bool includesGuardPages)
+{
+#if OS(QNX)
+ // Reserve memory with PROT_NONE and MAP_LAZY so it isn't committed now.
+ void* result = mmap(0, bytes, PROT_NONE, MAP_LAZY | MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (result == MAP_FAILED)
+ CRASH();
+#elif OS(LINUX)
+ UNUSED_PARAM(usage);
+ UNUSED_PARAM(writable);
+ UNUSED_PARAM(executable);
+ UNUSED_PARAM(includesGuardPages);
+
+ void* result = mmap(0, bytes, PROT_NONE, MAP_NORESERVE | MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (result == MAP_FAILED)
+ CRASH();
+ madvise(result, bytes, MADV_DONTNEED);
+#else
+ void* result = reserveAndCommit(bytes, usage, writable, executable, includesGuardPages);
+#if HAVE(MADV_FREE_REUSE)
+ // To support the "reserve then commit" model, we have to initially decommit.
+ while (madvise(result, bytes, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+#endif
+
+#endif // OS(QNX)
+
+ return result;
+}
+
+void* OSAllocator::reserveAndCommit(size_t bytes, Usage usage, bool writable, bool executable, bool includesGuardPages)
+{
+ // All POSIX reservations start out logically committed.
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+
+ int flags = MAP_PRIVATE | MAP_ANON;
+#if PLATFORM(IOS)
+ if (executable)
+ flags |= MAP_JIT;
+#endif
+
+#if OS(DARWIN)
+ int fd = usage;
+#else
+ UNUSED_PARAM(usage);
+ int fd = -1;
+#endif
+
+ void* result = 0;
+#if (OS(DARWIN) && CPU(X86_64))
+ if (executable) {
+ ASSERT(includesGuardPages);
+ // Cook up an address to allocate at, using the following recipe:
+ // 17 bits of zero, stay in userspace kids.
+ // 26 bits of randomness for ASLR.
+ // 21 bits of zero, at least stay aligned within one level of the pagetables.
+ //
+ // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
+ // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
+ // 2^24, which should put up somewhere in the middle of userspace (in the address range
+ // 0x200000000000 .. 0x5fffffffffff).
+ intptr_t randomLocation = 0;
+ randomLocation = arc4random() & ((1 << 25) - 1);
+ randomLocation += (1 << 24);
+ randomLocation <<= 21;
+ result = reinterpret_cast<void*>(randomLocation);
+ }
+#endif
+
+ result = mmap(result, bytes, protection, flags, fd, 0);
+ if (result == MAP_FAILED) {
+#if ENABLE(LLINT)
+ if (executable)
+ result = 0;
+ else
+#endif
+ CRASH();
+ }
+ if (result && includesGuardPages) {
+ // We use mmap to remap the guardpages rather than using mprotect as
+ // mprotect results in multiple references to the code region. This
+ // breaks the madvise based mechanism we use to return physical memory
+ // to the OS.
+ mmap(result, pageSize(), PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, fd, 0);
+ mmap(static_cast<char*>(result) + bytes - pageSize(), pageSize(), PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, fd, 0);
+ }
+ return result;
+}
+
+void OSAllocator::commit(void* address, size_t bytes, bool writable, bool executable)
+{
+#if OS(QNX)
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+ if (MAP_FAILED == mmap(address, bytes, protection, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0))
+ CRASH();
+#elif OS(LINUX)
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+ if (mprotect(address, bytes, protection))
+ CRASH();
+ madvise(address, bytes, MADV_WILLNEED);
+#elif HAVE(MADV_FREE_REUSE)
+ UNUSED_PARAM(writable);
+ UNUSED_PARAM(executable);
+ while (madvise(address, bytes, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+#else
+ // Non-MADV_FREE_REUSE reservations automatically commit on demand.
+ UNUSED_PARAM(address);
+ UNUSED_PARAM(bytes);
+ UNUSED_PARAM(writable);
+ UNUSED_PARAM(executable);
+#endif
+}
+
+void OSAllocator::decommit(void* address, size_t bytes)
+{
+#if OS(QNX)
+ // Use PROT_NONE and MAP_LAZY to decommit the pages.
+ mmap(address, bytes, PROT_NONE, MAP_FIXED | MAP_LAZY | MAP_PRIVATE | MAP_ANON, -1, 0);
+#elif OS(LINUX)
+ madvise(address, bytes, MADV_DONTNEED);
+ if (mprotect(address, bytes, PROT_NONE))
+ CRASH();
+#elif HAVE(MADV_FREE_REUSE)
+ while (madvise(address, bytes, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+#elif HAVE(MADV_FREE)
+ while (madvise(address, bytes, MADV_FREE) == -1 && errno == EAGAIN) { }
+#elif HAVE(MADV_DONTNEED)
+ while (madvise(address, bytes, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+#else
+ UNUSED_PARAM(address);
+ UNUSED_PARAM(bytes);
+#endif
+}
+
+void OSAllocator::releaseDecommitted(void* address, size_t bytes)
+{
+ int result = munmap(address, bytes);
+ if (result == -1)
+ CRASH();
+}
+
+} // namespace WTF
+
+#endif // OS(UNIX)