summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEdward Welbourne <edward.welbourne@qt.io>2018-10-03 14:05:24 +0200
committerEdward Welbourne <edward.welbourne@qt.io>2018-10-08 14:18:54 +0000
commit55d640483c1100d9340418c3fb96390087f5e7c4 (patch)
treebda61d15f600cecd87037b9e6406b4383df03f07
parentbc099f33ddc03af48fcef226a6c95e7dd79a64d4 (diff)
Update 3rdparty valgrind headers to Valgrind 3.13.0
The version we're using is at least seven years old. Let's try staying up to date ... Change-Id: I450a2fc37c22740fe554a81b471bf0fad340498e Reviewed-by: Thiago Macieira <thiago.macieira@intel.com>
-rw-r--r--src/testlib/3rdparty/VALGRIND_LICENSE.txt4
-rw-r--r--src/testlib/3rdparty/callgrind_p.h60
-rw-r--r--src/testlib/3rdparty/qt_attribution.json6
-rw-r--r--src/testlib/3rdparty/valgrind_p.h5643
4 files changed, 4203 insertions, 1510 deletions
diff --git a/src/testlib/3rdparty/VALGRIND_LICENSE.txt b/src/testlib/3rdparty/VALGRIND_LICENSE.txt
index 714b75e6d1..d17836a47d 100644
--- a/src/testlib/3rdparty/VALGRIND_LICENSE.txt
+++ b/src/testlib/3rdparty/VALGRIND_LICENSE.txt
@@ -1,5 +1,5 @@
- Copyright (C) 2000-2007 Julian Seward
- Copyright (C) 2003-2007 Josef Weidendorfer. All rights reserved.
+ Copyright (C) 2000-2017 Julian Seward. All rights reserved.
+ Copyright (C) 2003-2017 Josef Weidendorfer. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
diff --git a/src/testlib/3rdparty/callgrind_p.h b/src/testlib/3rdparty/callgrind_p.h
index 10d79318c4..e971283616 100644
--- a/src/testlib/3rdparty/callgrind_p.h
+++ b/src/testlib/3rdparty/callgrind_p.h
@@ -13,7 +13,7 @@
This file is part of callgrind, a valgrind tool for cache simulation
and call tree tracing.
- Copyright (C) 2003-2007 Josef Weidendorfer. All rights reserved.
+ Copyright (C) 2003-2017 Josef Weidendorfer. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -49,7 +49,7 @@
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
- (vgprof.h) only. The entire rest of Valgrind is licensed under
+ (callgrind.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
@@ -82,53 +82,38 @@ typedef
} Vg_CallgrindClientRequest;
/* Dump current state of cost centers, and zero them afterwards */
-#define CALLGRIND_DUMP_STATS \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__DUMP_STATS, \
- 0, 0, 0, 0, 0); \
- }
+#define CALLGRIND_DUMP_STATS \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DUMP_STATS, \
+ 0, 0, 0, 0, 0)
/* Dump current state of cost centers, and zero them afterwards.
The argument is appended to a string stating the reason which triggered
the dump. This string is written as a description field into the
profile data dump. */
-#define CALLGRIND_DUMP_STATS_AT(pos_str) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__DUMP_STATS_AT, \
- pos_str, 0, 0, 0, 0); \
- }
+#define CALLGRIND_DUMP_STATS_AT(pos_str) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DUMP_STATS_AT, \
+ pos_str, 0, 0, 0, 0)
/* Zero cost centers */
-#define CALLGRIND_ZERO_STATS \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__ZERO_STATS, \
- 0, 0, 0, 0, 0); \
- }
+#define CALLGRIND_ZERO_STATS \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__ZERO_STATS, \
+ 0, 0, 0, 0, 0)
/* Toggles collection state.
The collection state specifies whether the happening of events
should be noted or if they are to be ignored. Events are noted
by increment of counters in a cost center */
-#define CALLGRIND_TOGGLE_COLLECT \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__TOGGLE_COLLECT, \
- 0, 0, 0, 0, 0); \
- }
+#define CALLGRIND_TOGGLE_COLLECT \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__TOGGLE_COLLECT, \
+ 0, 0, 0, 0, 0)
/* Start full callgrind instrumentation if not already switched on.
When cache simulation is done, it will flush the simulated cache;
- this will lead to an artifical cache warmup phase afterwards with
+ this will lead to an artificial cache warmup phase afterwards with
cache misses which would not have happened in reality. */
-#define CALLGRIND_START_INSTRUMENTATION \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__START_INSTRUMENTATION, \
- 0, 0, 0, 0, 0); \
- }
+#define CALLGRIND_START_INSTRUMENTATION \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__START_INSTRUMENTATION, \
+ 0, 0, 0, 0, 0)
/* Stop full callgrind instrumentation if not already switched off.
This flushes Valgrinds translation cache, and does no additional
@@ -137,11 +122,8 @@ typedef
Use this to bypass Callgrind aggregation for uninteresting code parts.
To start Callgrind in this mode to ignore the setup phase, use
the option "--instr-atstart=no". */
-#define CALLGRIND_STOP_INSTRUMENTATION \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__STOP_INSTRUMENTATION, \
- 0, 0, 0, 0, 0); \
- }
+#define CALLGRIND_STOP_INSTRUMENTATION \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STOP_INSTRUMENTATION, \
+ 0, 0, 0, 0, 0)
#endif /* __CALLGRIND_H */
diff --git a/src/testlib/3rdparty/qt_attribution.json b/src/testlib/3rdparty/qt_attribution.json
index 18522b6cd8..734a74dbe1 100644
--- a/src/testlib/3rdparty/qt_attribution.json
+++ b/src/testlib/3rdparty/qt_attribution.json
@@ -8,12 +8,12 @@
"Description": "An instrumentation framework for building dynamic analysis tools.",
"Homepage": "http://valgrind.org/",
- "Version": "3.3.0",
+ "Version": "3.13.0",
"License": "BSD 4-clause \"Original\" or \"Old\" License",
"LicenseId": "BSD-4-Clause",
"LicenseFile": "VALGRIND_LICENSE.txt",
- "Copyright": "Copyright (C) 2000-2007 Julian Seward
-Copyright (C) 2003-2007 Josef Weidendorfer."
+ "Copyright": "Copyright (C) 2000-2017 Julian Seward
+Copyright (C) 2003-2017 Josef Weidendorfer."
},
{
"Id": "cycle",
diff --git a/src/testlib/3rdparty/valgrind_p.h b/src/testlib/3rdparty/valgrind_p.h
index 4745371d81..5aed0dfca5 100644
--- a/src/testlib/3rdparty/valgrind_p.h
+++ b/src/testlib/3rdparty/valgrind_p.h
@@ -12,7 +12,7 @@
This file is part of Valgrind, a dynamic binary instrumentation
framework.
- Copyright (C) 2000-2007 Julian Seward. All rights reserved.
+ Copyright (C) 2000-2017 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -73,6 +73,25 @@
#ifndef __VALGRIND_H
#define __VALGRIND_H
+
+/* ------------------------------------------------------------------ */
+/* VERSION NUMBER OF VALGRIND */
+/* ------------------------------------------------------------------ */
+
+/* Specify Valgrind's version number, so that user code can
+ conditionally compile based on our version number. Note that these
+ were introduced at version 3.6 and so do not exist in version 3.5
+ or earlier. The recommended way to use them to check for "version
+ X.Y or later" is (eg)
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
+ && (__VALGRIND_MAJOR__ > 3 \
+ || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
+*/
+#define __VALGRIND_MAJOR__ 3
+#define __VALGRIND_MINOR__ 13
+
+
#include <stdarg.h>
/* Nb: this file might be included in a file compiled with -ansi. So
@@ -84,34 +103,69 @@
identifying architectures, which are different to the ones we use
within the rest of Valgrind. Note, __powerpc__ is active for both
32 and 64-bit PPC, whereas __powerpc64__ is only active for the
- latter (on Linux, that is). */
+ latter (on Linux, that is).
+
+ Misc note: how to find out what's predefined in gcc by default:
+ gcc -Wp,-dM somefile.c
+*/
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
+#undef PLAT_amd64_win64
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
-#undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
-
-#if !defined(_AIX) && defined(__i386__)
+#undef PLAT_ppc64be_linux
+#undef PLAT_ppc64le_linux
+#undef PLAT_arm_linux
+#undef PLAT_arm64_linux
+#undef PLAT_s390x_linux
+#undef PLAT_mips32_linux
+#undef PLAT_mips64_linux
+#undef PLAT_x86_solaris
+#undef PLAT_amd64_solaris
+
+
+#if defined(__APPLE__) && defined(__i386__)
+# define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+# define PLAT_amd64_darwin 1
+#elif (defined(__MINGW32__) && !defined(__MINGW64__)) \
+ || defined(__CYGWIN32__) \
+ || (defined(_WIN32) && defined(_M_IX86))
+# define PLAT_x86_win32 1
+#elif defined(__MINGW64__) \
+ || (defined(_WIN64) && defined(_M_X64))
+# define PLAT_amd64_win64 1
+#elif defined(__linux__) && defined(__i386__)
# define PLAT_x86_linux 1
-#elif !defined(_AIX) && defined(__x86_64__)
+#elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__)
# define PLAT_amd64_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && !defined(__powerpc64__)
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
# define PLAT_ppc32_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && defined(__powerpc64__)
-# define PLAT_ppc64_linux 1
-#elif defined(_AIX) && defined(__64BIT__)
-# define PLAT_ppc64_aix5 1
-#elif defined(_AIX) && !defined(__64BIT__)
-# define PLAT_ppc32_aix5 1
-#endif
-
-
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2
+/* Big Endian uses ELF version 1 */
+# define PLAT_ppc64be_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2
+/* Little Endian uses ELF version 2 */
+# define PLAT_ppc64le_linux 1
+#elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__)
+# define PLAT_arm_linux 1
+#elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__)
+# define PLAT_arm64_linux 1
+#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
+# define PLAT_s390x_linux 1
+#elif defined(__linux__) && defined(__mips__) && (__mips==64)
+# define PLAT_mips64_linux 1
+#elif defined(__linux__) && defined(__mips__) && (__mips!=64)
+# define PLAT_mips32_linux 1
+#elif defined(__sun) && defined(__i386__)
+# define PLAT_x86_solaris 1
+#elif defined(__sun) && defined(__x86_64__)
+# define PLAT_amd64_solaris 1
+#else
/* If we're not compiling for our target platform, don't generate
any inline asms. */
-#if !defined(PLAT_x86_linux) && !defined(PLAT_amd64_linux) \
- && !defined(PLAT_ppc32_linux) && !defined(PLAT_ppc64_linux) \
- && !defined(PLAT_ppc32_aix5) && !defined(PLAT_ppc64_aix5)
# if !defined(NVALGRIND)
# define NVALGRIND 1
# endif
@@ -123,18 +177,41 @@
/* in here of use to end-users -- skip to the next section. */
/* ------------------------------------------------------------------ */
+/*
+ * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
+ * request. Accepts both pointers and integers as arguments.
+ *
+ * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind
+ * client request that does not return a value.
+
+ * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
+ * client request and whose value equals the client request result. Accepts
+ * both pointers and integers as arguments. Note that such calls are not
+ * necessarily pure functions -- they may have side effects.
+ */
+
+#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \
+ _zzq_request, _zzq_arg1, _zzq_arg2, \
+ _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \
+ (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
+ (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
+
+#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \
+ _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
+ (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
+
#if defined(NVALGRIND)
/* Define NVALGRIND to completely remove the Valgrind magic sequence
from the compiled code (analogous to NDEBUG's effects on
assert()) */
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { \
- (_zzq_rlval) = (_zzq_default); \
- (void)_zzq_rlval; \
- }
+ (_zzq_default)
#else /* ! NVALGRIND */
@@ -173,9 +250,11 @@
inline asm stuff to be useful.
*/
-/* ------------------------- x86-linux ------------------------- */
+/* ----------------- x86-{linux,darwin,solaris} ---------------- */
-#if defined(PLAT_x86_linux)
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
+ || (defined(PLAT_x86_win32) && defined(__GNUC__)) \
+ || defined(PLAT_x86_solaris)
typedef
struct {
@@ -187,10 +266,11 @@ typedef
"roll $3, %%edi ; roll $13, %%edi\n\t" \
"roll $29, %%edi ; roll $19, %%edi\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile unsigned int _zzq_args[6]; \
+ __extension__ \
+ ({volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
@@ -205,9 +285,8 @@ typedef
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
- _zzq_rlval = _zzq_result; \
- (void)_zzq_rlval; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -226,15 +305,100 @@ typedef
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%EAX */ \
"xchgl %%edx,%%edx\n\t"
-#endif /* PLAT_x86_linux */
-/* ------------------------ amd64-linux ------------------------ */
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "xchgl %%edi,%%edi\n\t" \
+ : : : "cc", "memory" \
+ ); \
+ } while (0)
+
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__)
+ || PLAT_x86_solaris */
+
+/* ------------------------- x86-Win32 ------------------------- */
+
+#if defined(PLAT_x86_win32) && !defined(__GNUC__)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#if defined(_MSC_VER)
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ __asm rol edi, 3 __asm rol edi, 13 \
+ __asm rol edi, 29 __asm rol edi, 19
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \
+ (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
+ (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \
+ (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
+
+static __inline uintptr_t
+valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
+ uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
+ uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
+ uintptr_t _zzq_arg5)
+{
+ volatile uintptr_t _zzq_args[6];
+ volatile unsigned int _zzq_result;
+ _zzq_args[0] = (uintptr_t)(_zzq_request);
+ _zzq_args[1] = (uintptr_t)(_zzq_arg1);
+ _zzq_args[2] = (uintptr_t)(_zzq_arg2);
+ _zzq_args[3] = (uintptr_t)(_zzq_arg3);
+ _zzq_args[4] = (uintptr_t)(_zzq_arg4);
+ _zzq_args[5] = (uintptr_t)(_zzq_arg5);
+ __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
+ __SPECIAL_INSTRUCTION_PREAMBLE
+ /* %EDX = client_request ( %EAX ) */
+ __asm xchg ebx,ebx
+ __asm mov _zzq_result, edx
+ }
+ return _zzq_result;
+}
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ __asm xchg ecx,ecx \
+ __asm mov __addr, eax \
+ } \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX ERROR
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
+ __asm xchg edi,edi \
+ } \
+ } while (0)
+
+#else
+#error Unsupported compiler.
+#endif
+
+#endif /* PLAT_x86_win32 */
-#if defined(PLAT_amd64_linux)
+/* ----------------- amd64-{linux,darwin,solaris} --------------- */
+
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
+ || defined(PLAT_amd64_solaris) \
+ || (defined(PLAT_amd64_win64) && defined(__GNUC__))
typedef
struct {
- unsigned long long int nraddr; /* where's the code? */
+ unsigned long int nraddr; /* where's the code? */
}
OrigFn;
@@ -242,17 +406,18 @@ typedef
"rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
"rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile unsigned long long int _zzq_args[6]; \
- volatile unsigned long long int _zzq_result; \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ __extension__ \
+ ({ volatile unsigned long int _zzq_args[6]; \
+ volatile unsigned long int _zzq_result; \
+ _zzq_args[0] = (unsigned long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RDX = client_request ( %RAX ) */ \
"xchgq %%rbx,%%rbx" \
@@ -260,13 +425,12 @@ typedef
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
- _zzq_rlval = _zzq_result; \
- (void)_zzq_rlval; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned long long int __addr; \
+ volatile unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RAX = guest_NRADDR */ \
"xchgq %%rcx,%%rcx" \
@@ -281,7 +445,24 @@ typedef
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%RAX */ \
"xchgq %%rdx,%%rdx\n\t"
-#endif /* PLAT_amd64_linux */
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "xchgq %%rdi,%%rdi\n\t" \
+ : : : "cc", "memory" \
+ ); \
+ } while (0)
+
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
+
+/* ------------------------- amd64-Win64 ------------------------- */
+
+#if defined(PLAT_amd64_win64) && !defined(__GNUC__)
+
+#error Unsupported compiler.
+
+#endif /* PLAT_amd64_win64 */
/* ------------------------ ppc32-linux ------------------------ */
@@ -294,14 +475,15 @@ typedef
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
- "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+ "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \
+ "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned int _zzq_args[6]; \
+ __extension__ \
+ ({ unsigned int _zzq_args[6]; \
unsigned int _zzq_result; \
unsigned int* _zzq_ptr; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
@@ -320,9 +502,8 @@ typedef
: "=b" (_zzq_result) \
: "b" (_zzq_default), "b" (_zzq_ptr) \
: "cc", "memory", "r3", "r4"); \
- _zzq_rlval = _zzq_result; \
- (void)_zzq_rlval; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -342,16 +523,24 @@ typedef
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R11 */ \
"or 3,3,3\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or 5,5,5\n\t" \
+ ); \
+ } while (0)
+
#endif /* PLAT_ppc32_linux */
/* ------------------------ ppc64-linux ------------------------ */
-#if defined(PLAT_ppc64_linux)
+#if defined(PLAT_ppc64be_linux)
typedef
struct {
- unsigned long long int nraddr; /* where's the code? */
- unsigned long long int r2; /* what tocptr do we need? */
+ unsigned long int nraddr; /* where's the code? */
+ unsigned long int r2; /* what tocptr do we need? */
}
OrigFn;
@@ -359,47 +548,52 @@ typedef
"rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
"rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned long long int _zzq_args[6]; \
- register unsigned long long int _zzq_result __asm__("r3"); \
- register unsigned long long int* _zzq_ptr __asm__("r4"); \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ __extension__ \
+ ({ unsigned long int _zzq_args[6]; \
+ unsigned long int _zzq_result; \
+ unsigned long int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ __asm__ volatile("mr 3,%1\n\t" /*default*/ \
+ "mr 4,%2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1" \
- : "=r" (_zzq_result) \
- : "0" (_zzq_default), "r" (_zzq_ptr) \
- : "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- (void)_zzq_rlval; \
- }
+ "or 1,1,1\n\t" \
+ "mr %0,3" /*result*/ \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_default), "b" (_zzq_ptr) \
+ : "cc", "memory", "r3", "r4"); \
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned long long int __addr __asm__("r3"); \
+ unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
- "or 2,2,2" \
- : "=r" (__addr) \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
: \
- : "cc", "memory" \
+ : "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4" \
- : "=r" (__addr) \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
: \
- : "cc", "memory" \
+ : "cc", "memory", "r3" \
); \
_zzq_orig->r2 = __addr; \
}
@@ -409,61 +603,65 @@ typedef
/* branch-and-link-to-noredir *%R11 */ \
"or 3,3,3\n\t"
-#endif /* PLAT_ppc64_linux */
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or 5,5,5\n\t" \
+ ); \
+ } while (0)
-/* ------------------------ ppc32-aix5 ------------------------- */
+#endif /* PLAT_ppc64be_linux */
-#if defined(PLAT_ppc32_aix5)
+#if defined(PLAT_ppc64le_linux)
typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- unsigned int r2; /* what tocptr do we need? */
+ struct {
+ unsigned long int nraddr; /* where's the code? */
+ unsigned long int r2; /* what tocptr do we need? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
- "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned int _zzq_args[7]; \
- register unsigned int _zzq_result; \
- register unsigned int* _zzq_ptr; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- _zzq_args[6] = (unsigned int)(_zzq_default); \
+ __extension__ \
+ ({ unsigned long int _zzq_args[6]; \
+ unsigned long int _zzq_result; \
+ unsigned long int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 4,%1\n\t" \
- "lwz 3, 24(4)\n\t" \
+ __asm__ volatile("mr 3,%1\n\t" /*default*/ \
+ "mr 4,%2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
"or 1,1,1\n\t" \
- "mr %0,3" \
+ "mr %0,3" /*result*/ \
: "=b" (_zzq_result) \
- : "b" (_zzq_ptr) \
- : "r3", "r4", "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- (void)_zzq_rlval; \
- }
+ : "b" (_zzq_default), "b" (_zzq_ptr) \
+ : "cc", "memory", "r3", "r4"); \
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned int __addr; \
+ unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
- : "r3", "cc", "memory" \
+ : "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
@@ -472,90 +670,380 @@ typedef
"mr %0,3" \
: "=b" (__addr) \
: \
- : "r3", "cc", "memory" \
+ : "cc", "memory", "r3" \
); \
_zzq_orig->r2 = __addr; \
}
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
__SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
+ /* branch-and-link-to-noredir *%R12 */ \
"or 3,3,3\n\t"
-#endif /* PLAT_ppc32_aix5 */
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or 5,5,5\n\t" \
+ ); \
+ } while (0)
+
+#endif /* PLAT_ppc64le_linux */
-/* ------------------------ ppc64-aix5 ------------------------- */
+/* ------------------------- arm-linux ------------------------- */
-#if defined(PLAT_ppc64_aix5)
+#if defined(PLAT_arm_linux)
typedef
struct {
- unsigned long long int nraddr; /* where's the code? */
- unsigned long long int r2; /* what tocptr do we need? */
+ unsigned int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
- "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+ "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
+ "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned long long int _zzq_args[7]; \
- register unsigned long long int _zzq_result; \
- register unsigned long long int* _zzq_ptr; \
- _zzq_args[0] = (unsigned int long long)(_zzq_request); \
- _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
- _zzq_args[6] = (unsigned int long long)(_zzq_default); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 4,%1\n\t" \
- "ld 3, 48(4)\n\t" \
+ __extension__ \
+ ({volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile("mov r3, %1\n\t" /*default*/ \
+ "mov r4, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" \
- : "=b" (_zzq_result) \
- : "b" (_zzq_ptr) \
- : "r3", "r4", "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- (void)_zzq_rlval; \
- }
+ /* R3 = client_request ( R4 ) */ \
+ "orr r10, r10, r10\n\t" \
+ "mov %0, r3" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "cc","memory", "r3", "r4"); \
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned long long int __addr; \
+ unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
+ /* R3 = guest_NRADDR */ \
+ "orr r11, r11, r11\n\t" \
+ "mov %0, r3" \
+ : "=r" (__addr) \
: \
- : "r3", "cc", "memory" \
+ : "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R4 */ \
+ "orr r12, r12, r12\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "orr r9, r9, r9\n\t" \
+ : : : "cc", "memory" \
+ ); \
+ } while (0)
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ arm64-linux ------------------------- */
+
+#if defined(PLAT_arm64_linux)
+
+typedef
+ struct {
+ unsigned long int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \
+ "ror x12, x12, #51 ; ror x12, x12, #61 \n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ __extension__ \
+ ({volatile unsigned long int _zzq_args[6]; \
+ volatile unsigned long int _zzq_result; \
+ _zzq_args[0] = (unsigned long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
+ __asm__ volatile("mov x3, %1\n\t" /*default*/ \
+ "mov x4, %2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* X3 = client_request ( X4 ) */ \
+ "orr x10, x10, x10\n\t" \
+ "mov %0, x3" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" ((unsigned long int)(_zzq_default)), \
+ "r" (&_zzq_args[0]) \
+ : "cc","memory", "x3", "x4"); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
+ /* X3 = guest_NRADDR */ \
+ "orr x11, x11, x11\n\t" \
+ "mov %0, x3" \
+ : "=r" (__addr) \
: \
- : "r3", "cc", "memory" \
+ : "cc", "memory", "x3" \
); \
- _zzq_orig->r2 = __addr; \
+ _zzq_orig->nraddr = __addr; \
}
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
__SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
+ /* branch-and-link-to-noredir X8 */ \
+ "orr x12, x12, x12\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "orr x9, x9, x9\n\t" \
+ : : : "cc", "memory" \
+ ); \
+ } while (0)
+
+#endif /* PLAT_arm64_linux */
+
+/* ------------------------ s390x-linux ------------------------ */
+
+#if defined(PLAT_s390x_linux)
+
+typedef
+ struct {
+ unsigned long int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
+ * code. This detection is implemented in platform specific toIR.c
+ * (e.g. VEX/priv/guest_s390_decoder.c).
+ */
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "lr 15,15\n\t" \
+ "lr 1,1\n\t" \
+ "lr 2,2\n\t" \
+ "lr 3,3\n\t"
+
+#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
+#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
+#define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
+#define __VEX_INJECT_IR_CODE "lr 5,5\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ __extension__ \
+ ({volatile unsigned long int _zzq_args[6]; \
+ volatile unsigned long int _zzq_result; \
+ _zzq_args[0] = (unsigned long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
+ __asm__ volatile(/* r2 = args */ \
+ "lgr 2,%1\n\t" \
+ /* r3 = default */ \
+ "lgr 3,%2\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ __CLIENT_REQUEST_CODE \
+ /* results = r3 */ \
+ "lgr %0, 3\n\t" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "2", "3", "memory" \
+ ); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ __GET_NR_CONTEXT_CODE \
+ "lgr %0, 3\n\t" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "3", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_R1 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ __CALL_NO_REDIR_CODE
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ __VEX_INJECT_IR_CODE); \
+ } while (0)
+
+#endif /* PLAT_s390x_linux */
+
+/* ------------------------- mips32-linux ---------------- */
+
+#if defined(PLAT_mips32_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
-#endif /* PLAT_ppc64_aix5 */
+/* .word 0x342
+ * .word 0x742
+ * .word 0xC2
+ * .word 0x4C2*/
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "srl $0, $0, 13\n\t" \
+ "srl $0, $0, 29\n\t" \
+ "srl $0, $0, 3\n\t" \
+ "srl $0, $0, 19\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ __extension__ \
+ ({ volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile("move $11, %1\n\t" /*default*/ \
+ "move $12, %2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* T3 = client_request ( T4 ) */ \
+ "or $13, $13, $13\n\t" \
+ "move %0, $11\n\t" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "$11", "$12", "memory"); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %t9 = guest_NRADDR */ \
+ "or $14, $14, $14\n\t" \
+ "move %0, $11" /*result*/ \
+ : "=r" (__addr) \
+ : \
+ : "$11" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_T9 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%t9 */ \
+ "or $15, $15, $15\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or $11, $11, $11\n\t" \
+ ); \
+ } while (0)
+
+
+#endif /* PLAT_mips32_linux */
+
+/* ------------------------- mips64-linux ---------------- */
+
+#if defined(PLAT_mips64_linux)
+
+typedef
+ struct {
+ unsigned long nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+/* dsll $0,$0, 3
+ * dsll $0,$0, 13
+ * dsll $0,$0, 29
+ * dsll $0,$0, 19*/
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \
+ "dsll $0,$0,29 ; dsll $0,$0,19\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ __extension__ \
+ ({ volatile unsigned long int _zzq_args[6]; \
+ volatile unsigned long int _zzq_result; \
+ _zzq_args[0] = (unsigned long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
+ __asm__ volatile("move $11, %1\n\t" /*default*/ \
+ "move $12, %2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* $11 = client_request ( $12 ) */ \
+ "or $13, $13, $13\n\t" \
+ "move %0, $11\n\t" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "$11", "$12", "memory"); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* $11 = guest_NRADDR */ \
+ "or $14, $14, $14\n\t" \
+ "move %0, $11" /*result*/ \
+ : "=r" (__addr) \
+ : \
+ : "$11"); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_T9 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir $25 */ \
+ "or $15, $15, $15\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or $11, $11, $11\n\t" \
+ ); \
+ } while (0)
+
+#endif /* PLAT_mips64_linux */
/* Insert assembly code for other platforms here... */
@@ -586,13 +1074,20 @@ typedef
*/
/* Use these to write the name of your wrapper. NOTE: duplicates
- VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+ VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts
+ the default behaviour equivalance class tag "0000" into the name.
+ See pub_tool_redir.h for details -- normally you don't need to
+ think about this, though. */
+
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+ args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
- _vgwZU_##soname##_##fnname
+ VG_CONCAT4(_vgw00000ZU_,soname,_,fnname)
#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
- _vgwZZ_##soname##_##fnname
+ VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname)
/* Use this macro from within a wrapper function to collect the
context (address and possibly other info) of the original function.
@@ -600,6 +1095,18 @@ typedef
macros. The type of the argument _lval is OrigFn. */
#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
+/* Also provide end-user facilities for function replacement, rather
+ than wrapping. A replacement function differs from a wrapper in
+ that it has no way to get hold of the original function being
+ called, and hence no way to call onwards to it. In a replacement
+ function, VALGRIND_GET_ORIG_FN always returns zero. */
+
+#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \
+ VG_CONCAT4(_vgr00000ZU_,soname,_,fnname)
+
+#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \
+ VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname)
+
/* Derivatives of the main macros below, for calling functions
returning void. */
@@ -619,14 +1126,42 @@ typedef
do { volatile unsigned long _junk; \
CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
-/* ------------------------- x86-linux ------------------------- */
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
+
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ----------------- x86-{linux,darwin,solaris} ---------------- */
-#if defined(PLAT_x86_linux)
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
+ || defined(PLAT_x86_solaris)
/* These regs are trashed by the hidden call. No need to mention eax
as gcc can already see that, plus causes gcc to bomb. */
#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+#define VALGRIND_ALIGN_STACK \
+ "movl %%esp,%%edi\n\t" \
+ "andl $0xfffffff0,%%esp\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "movl %%edi,%%esp\n\t"
+
/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
long) == 4. */
@@ -637,11 +1172,13 @@ typedef
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -654,13 +1191,15 @@ typedef
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "subl $12, %%esp\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $4, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -674,14 +1213,16 @@ typedef
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "subl $8, %%esp\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $8, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -696,15 +1237,17 @@ typedef
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "subl $4, %%esp\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $12, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -720,16 +1263,17 @@ typedef
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -746,6 +1290,8 @@ typedef
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "subl $12, %%esp\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
@@ -753,10 +1299,10 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $20, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -774,6 +1320,8 @@ typedef
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "subl $8, %%esp\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
@@ -782,10 +1330,10 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $24, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -805,6 +1353,8 @@ typedef
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "subl $4, %%esp\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
@@ -814,10 +1364,10 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $28, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -838,6 +1388,7 @@ typedef
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
@@ -848,10 +1399,10 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -873,6 +1424,8 @@ typedef
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "subl $12, %%esp\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
@@ -884,10 +1437,10 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $36, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -910,6 +1463,8 @@ typedef
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "subl $8, %%esp\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
@@ -922,10 +1477,10 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $40, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -950,6 +1505,8 @@ typedef
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "subl $4, %%esp\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
@@ -963,10 +1520,10 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $44, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -992,6 +1549,7 @@ typedef
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"pushl 48(%%eax)\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
@@ -1006,19 +1564,20 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_x86_linux */
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */
-/* ------------------------ amd64-linux ------------------------ */
+/* ---------------- amd64-{linux,darwin,solaris} --------------- */
-#if defined(PLAT_amd64_linux)
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
+ || defined(PLAT_amd64_solaris)
/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
@@ -1026,6 +1585,88 @@ typedef
#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
"rdi", "r8", "r9", "r10", "r11"
+/* This is all pretty complex. It's so as to make stack unwinding
+ work reliably. See bug 243270. The basic problem is the sub and
+ add of 128 of %rsp in all of the following macros. If gcc believes
+ the CFA is in %rsp, then unwinding may fail, because what's at the
+ CFA is not what gcc "expected" when it constructs the CFIs for the
+ places where the macros are instantiated.
+
+ But we can't just add a CFI annotation to increase the CFA offset
+ by 128, to match the sub of 128 from %rsp, because we don't know
+ whether gcc has chosen %rsp as the CFA at that point, or whether it
+ has chosen some other register (eg, %rbp). In the latter case,
+ adding a CFI annotation to change the CFA offset is simply wrong.
+
+ So the solution is to get hold of the CFA using
+ __builtin_dwarf_cfa(), put it in a known register, and add a
+ CFI annotation to say what the register is. We choose %rbp for
+ this (perhaps perversely), because:
+
+ (1) %rbp is already subject to unwinding. If a new register was
+ chosen then the unwinder would have to unwind it in all stack
+ traces, which is expensive, and
+
+ (2) %rbp is already subject to precise exception updates in the
+ JIT. If a new register was chosen, we'd have to have precise
+ exceptions for it too, which reduces performance of the
+ generated code.
+
+ However .. one extra complication. We can't just whack the result
+ of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
+ list of trashed registers at the end of the inline assembly
+ fragments; gcc won't allow %rbp to appear in that list. Hence
+ instead we need to stash %rbp in %r15 for the duration of the asm,
+ and say that %r15 is trashed instead. gcc seems happy to go with
+ that.
+
+ Oh .. and this all needs to be conditionalised so that it is
+ unchanged from before this commit, when compiled with older gccs
+ that don't support __builtin_dwarf_cfa. Furthermore, since
+ this header file is freestanding, it has to be independent of
+ config.h, and so the following conditionalisation cannot depend on
+ configure time checks.
+
+ Although it's not clear from
+ 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
+ this expression excludes Darwin.
+ .cfi directives in Darwin assembly appear to be completely
+ different and I haven't investigated how they work.
+
+ For even more entertainment value, note we have to use the
+ completely undocumented __builtin_dwarf_cfa(), which appears to
+ really compute the CFA, whereas __builtin_frame_address(0) claims
+ to but actually doesn't. See
+ https://bugs.kde.org/show_bug.cgi?id=243270#c47
+*/
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+# define __FRAME_POINTER \
+ ,"r"(__builtin_dwarf_cfa())
+# define VALGRIND_CFI_PROLOGUE \
+ "movq %%rbp, %%r15\n\t" \
+ "movq %2, %%rbp\n\t" \
+ ".cfi_remember_state\n\t" \
+ ".cfi_def_cfa rbp, 0\n\t"
+# define VALGRIND_CFI_EPILOGUE \
+ "movq %%r15, %%rbp\n\t" \
+ ".cfi_restore_state\n\t"
+#else
+# define __FRAME_POINTER
+# define VALGRIND_CFI_PROLOGUE
+# define VALGRIND_CFI_EPILOGUE
+#endif
+
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+#define VALGRIND_ALIGN_STACK \
+ "movq %%rsp,%%r14\n\t" \
+ "andq $0xfffffffffffffff0,%%rsp\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "movq %%r14,%%rsp\n\t"
+
/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
long) == 8. */
@@ -1045,414 +1686,447 @@ typedef
redzone, for the duration of the hidden call, to make it safe.
Probably the same problem afflicts the other redzone-style ABIs too
- (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+ (ppc64-linux); but for those, the stack is
self describing (none of this CFI nonsense) so at least messing
with the stack pointer doesn't give a danger of non-unwindable
stack. */
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CALL_NOREDIR_RAX \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $8, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $136,%%rsp\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $16, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $24, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $136,%%rsp\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $32, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $40, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $136,%%rsp\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- "subq $128,%%rsp\n\t" \
- "pushq 96(%%rax)\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $48, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "pushq 96(%%rax)\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_amd64_linux */
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
/* ------------------------ ppc32-linux ------------------------ */
@@ -1488,6 +2162,17 @@ typedef
"r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+#define VALGRIND_ALIGN_STACK \
+ "mr 28,1\n\t" \
+ "rlwinm 1,1,0,0,27\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "mr 1,28\n\t"
+
/* These CALL_FN_ macros assume that on ppc32-linux,
sizeof(unsigned long) == 4. */
@@ -1498,13 +2183,15 @@ typedef
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1517,14 +2204,16 @@ typedef
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1538,15 +2227,17 @@ typedef
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1561,16 +2252,18 @@ typedef
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1586,6 +2279,7 @@ typedef
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
@@ -1593,10 +2287,11 @@ typedef
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1613,6 +2308,7 @@ typedef
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
@@ -1621,10 +2317,11 @@ typedef
"lwz 7,20(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1642,6 +2339,7 @@ typedef
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
@@ -1651,10 +2349,11 @@ typedef
"lwz 8,24(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1674,6 +2373,7 @@ typedef
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
@@ -1684,10 +2384,11 @@ typedef
"lwz 9,28(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1708,6 +2409,7 @@ typedef
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
@@ -1719,10 +2421,11 @@ typedef
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1744,6 +2447,7 @@ typedef
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-16\n\t" \
/* arg9 */ \
@@ -1760,11 +2464,11 @@ typedef
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,16\n\t" \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1787,6 +2491,7 @@ typedef
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-16\n\t" \
/* arg10 */ \
@@ -1806,11 +2511,11 @@ typedef
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,16\n\t" \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1834,6 +2539,7 @@ typedef
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-32\n\t" \
/* arg11 */ \
@@ -1856,11 +2562,11 @@ typedef
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,32\n\t" \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1885,6 +2591,7 @@ typedef
_argvec[11] = (unsigned long)arg11; \
_argvec[12] = (unsigned long)arg12; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-32\n\t" \
/* arg12 */ \
@@ -1910,11 +2617,11 @@ typedef
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,32\n\t" \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1923,7 +2630,7 @@ typedef
/* ------------------------ ppc64-linux ------------------------ */
-#if defined(PLAT_ppc64_linux)
+#if defined(PLAT_ppc64be_linux)
/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
@@ -1931,9 +2638,20 @@ typedef
#define __CALLER_SAVED_REGS \
"lr", "ctr", "xer", \
"cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+#define VALGRIND_ALIGN_STACK \
+ "mr 28,1\n\t" \
+ "rldicr 1,1,0,59\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "mr 1,28\n\t"
+
/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
long) == 8. */
@@ -1946,6 +2664,7 @@ typedef
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -1953,10 +2672,11 @@ typedef
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1971,6 +2691,7 @@ typedef
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -1979,10 +2700,11 @@ typedef
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1998,6 +2720,7 @@ typedef
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2007,10 +2730,11 @@ typedef
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2027,6 +2751,7 @@ typedef
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2037,10 +2762,11 @@ typedef
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2058,6 +2784,7 @@ typedef
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2069,10 +2796,11 @@ typedef
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2091,6 +2819,7 @@ typedef
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2103,10 +2832,11 @@ typedef
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2126,6 +2856,7 @@ typedef
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2139,10 +2870,11 @@ typedef
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2164,6 +2896,7 @@ typedef
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2178,10 +2911,11 @@ typedef
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2204,6 +2938,7 @@ typedef
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2219,10 +2954,11 @@ typedef
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2246,6 +2982,7 @@ typedef
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2267,10 +3004,10 @@ typedef
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,128" /* restore frame */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2295,6 +3032,7 @@ typedef
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2319,10 +3057,10 @@ typedef
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,128" /* restore frame */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2348,6 +3086,7 @@ typedef
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2375,10 +3114,10 @@ typedef
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,144" /* restore frame */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2405,6 +3144,7 @@ typedef
_argvec[2+11] = (unsigned long)arg11; \
_argvec[2+12] = (unsigned long)arg12; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
@@ -2435,19 +3175,18 @@ typedef
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,144" /* restore frame */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_ppc64_linux */
+#endif /* PLAT_ppc64be_linux */
-/* ------------------------ ppc32-aix5 ------------------------- */
-
-#if defined(PLAT_ppc32_aix5)
+/* ------------------------- ppc64le-linux ----------------------- */
+#if defined(PLAT_ppc64le_linux)
/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
@@ -2455,22 +3194,22 @@ typedef
#define __CALLER_SAVED_REGS \
"lr", "ctr", "xer", \
"cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
-/* Expand the stack frame, copying enough info that unwinding
- still works. Trashes r3. */
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
- "addi 1,1,-" #_n_fr "\n\t" \
- "lwz 3," #_n_fr "(1)\n\t" \
- "stw 3,0(1)\n\t"
+#define VALGRIND_ALIGN_STACK \
+ "mr 28,1\n\t" \
+ "rldicr 1,1,0,59\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "mr 1,28\n\t"
-#define VG_CONTRACT_FRAME_BY(_n_fr) \
- "addi 1,1," #_n_fr "\n\t"
-
-/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
- long) == 4. */
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+ long) == 8. */
#define CALL_FN_W_v(lval, orig) \
do { \
@@ -2481,19 +3220,19 @@ typedef
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2508,20 +3247,20 @@ typedef
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2537,21 +3276,21 @@ typedef
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2568,22 +3307,22 @@ typedef
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2601,23 +3340,23 @@ typedef
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(12)\n\t" /* arg4->r6 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2636,24 +3375,24 @@ typedef
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(12)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(12)\n\t" /* arg5->r7 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2673,25 +3412,25 @@ typedef
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(12)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(12)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(12)\n\t" /* arg6->r8 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2713,26 +3452,26 @@ typedef
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(12)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(12)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(12)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(12)\n\t" /* arg7->r9 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2755,27 +3494,27 @@ typedef
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(12)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(12)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(12)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(12)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(12)\n\t" /* arg8->r10 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2799,33 +3538,32 @@ typedef
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
+ "ld 3,72(12)\n\t" \
+ "std 3,96(1)\n\t" \
/* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(12)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(12)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(12)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(12)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(12)\n\t" /* arg8->r10 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(64) \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2850,36 +3588,35 @@ typedef
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
+ "ld 3,80(12)\n\t" \
+ "std 3,104(1)\n\t" \
/* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
+ "ld 3,72(12)\n\t" \
+ "std 3,96(1)\n\t" \
/* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(12)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(12)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(12)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(12)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(12)\n\t" /* arg8->r10 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(64) \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2905,39 +3642,38 @@ typedef
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,64(1)\n\t" \
+ "ld 3,88(12)\n\t" \
+ "std 3,112(1)\n\t" \
/* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
+ "ld 3,80(12)\n\t" \
+ "std 3,104(1)\n\t" \
/* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
+ "ld 3,72(12)\n\t" \
+ "std 3,96(1)\n\t" \
/* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(12)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(12)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(12)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(12)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(12)\n\t" /* arg8->r10 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(72) \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2964,97 +3700,546 @@ typedef
_argvec[2+11] = (unsigned long)arg11; \
_argvec[2+12] = (unsigned long)arg12; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ VALGRIND_ALIGN_STACK \
+ "mr 12,%1\n\t" \
+ "std 2,-16(12)\n\t" /* save tocptr */ \
+ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg12 */ \
- "lwz 3,48(11)\n\t" \
- "stw 3,68(1)\n\t" \
+ "ld 3,96(12)\n\t" \
+ "std 3,120(1)\n\t" \
/* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,64(1)\n\t" \
+ "ld 3,88(12)\n\t" \
+ "std 3,112(1)\n\t" \
/* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
+ "ld 3,80(12)\n\t" \
+ "std 3,104(1)\n\t" \
/* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
+ "ld 3,72(12)\n\t" \
+ "std 3,96(1)\n\t" \
/* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
+ "ld 3, 8(12)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(12)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(12)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(12)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(12)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(12)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(12)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(12)\n\t" /* arg8->r10 */ \
+ "ld 12, 0(12)\n\t" /* target->r12 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
+ "mr 12,%1\n\t" \
"mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(72) \
- VG_CONTRACT_FRAME_BY(512) \
+ "ld 2,-16(12)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_ppc32_aix5 */
-
-/* ------------------------ ppc64-aix5 ------------------------- */
+#endif /* PLAT_ppc64le_linux */
-#if defined(PLAT_ppc64_aix5)
+/* ------------------------- arm-linux ------------------------- */
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+#if defined(PLAT_arm_linux)
/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14"
+
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+/* This is a bit tricky. We store the original stack pointer in r10
+ as it is callee-saves. gcc doesn't allow the use of r11 for some
+ reason. Also, we can't directly "bic" the stack pointer in thumb
+ mode since r13 isn't an allowed register number in that context.
+ So use r4 as a temporary, since that is about to get trashed
+ anyway, just after each use of this macro. Side effect is we need
+ to be very careful about any future changes, since
+ VALGRIND_ALIGN_STACK simply assumes r4 is usable. */
+#define VALGRIND_ALIGN_STACK \
+ "mov r10, sp\n\t" \
+ "mov r4, sp\n\t" \
+ "bic r4, r4, #7\n\t" \
+ "mov sp, r4\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "mov sp, r10\n\t"
+
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
-/* Expand the stack frame, copying enough info that unwinding
- still works. Trashes r3. */
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #4 \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
- "addi 1,1,-" #_n_fr "\n\t" \
- "ld 3," #_n_fr "(1)\n\t" \
- "std 3,0(1)\n\t"
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
-#define VG_CONTRACT_FRAME_BY(_n_fr) \
- "addi 1,1," #_n_fr "\n\t"
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #4 \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
-/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
- long) == 8. */
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "push {r0, r1, r2, r3} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #4 \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ldr r0, [%1, #40] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #4 \n\t" \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "ldr r2, [%1, #48] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ arm64-linux ------------------------ */
+
+#if defined(PLAT_arm64_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \
+ "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \
+ "x18", "x19", "x20", "x30", \
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \
+ "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \
+ "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \
+ "v26", "v27", "v28", "v29", "v30", "v31"
+
+/* x21 is callee-saved, so we can use it to save and restore SP around
+ the hidden call. */
+#define VALGRIND_ALIGN_STACK \
+ "mov x21, sp\n\t" \
+ "bic sp, x21, #15\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "mov sp, x21\n\t"
+
+/* These CALL_FN_ macros assume that on arm64-linux,
+ sizeof(unsigned long) == 8. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3062,27 +4247,20 @@ typedef
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3090,29 +4268,22 @@ typedef
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3120,31 +4291,24 @@ typedef
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3152,33 +4316,26 @@ typedef
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x3, [%1, #32] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3186,35 +4343,28 @@ typedef
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x3, [%1, #32] \n\t" \
+ "ldr x4, [%1, #40] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3222,37 +4372,30 @@ typedef
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x3, [%1, #32] \n\t" \
+ "ldr x4, [%1, #40] \n\t" \
+ "ldr x5, [%1, #48] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3261,39 +4404,32 @@ typedef
arg7) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x3, [%1, #32] \n\t" \
+ "ldr x4, [%1, #40] \n\t" \
+ "ldr x5, [%1, #48] \n\t" \
+ "ldr x6, [%1, #56] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3302,41 +4438,34 @@ typedef
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x3, [%1, #32] \n\t" \
+ "ldr x4, [%1, #40] \n\t" \
+ "ldr x5, [%1, #48] \n\t" \
+ "ldr x6, [%1, #56] \n\t" \
+ "ldr x7, [%1, #64] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3345,48 +4474,38 @@ typedef
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(128) \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(128) \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #0x20 \n\t" \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x3, [%1, #32] \n\t" \
+ "ldr x4, [%1, #40] \n\t" \
+ "ldr x5, [%1, #48] \n\t" \
+ "ldr x6, [%1, #56] \n\t" \
+ "ldr x7, [%1, #64] \n\t" \
+ "ldr x8, [%1, #72] \n\t" \
+ "str x8, [sp, #0] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3395,52 +4514,41 @@ typedef
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(128) \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(128) \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #0x20 \n\t" \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x3, [%1, #32] \n\t" \
+ "ldr x4, [%1, #40] \n\t" \
+ "ldr x5, [%1, #48] \n\t" \
+ "ldr x6, [%1, #56] \n\t" \
+ "ldr x7, [%1, #64] \n\t" \
+ "ldr x8, [%1, #72] \n\t" \
+ "str x8, [sp, #0] \n\t" \
+ "ldr x8, [%1, #80] \n\t" \
+ "str x8, [sp, #8] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -3449,124 +4557,1543 @@ typedef
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(144) \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(144) \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #0x30 \n\t" \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x3, [%1, #32] \n\t" \
+ "ldr x4, [%1, #40] \n\t" \
+ "ldr x5, [%1, #48] \n\t" \
+ "ldr x6, [%1, #56] \n\t" \
+ "ldr x7, [%1, #64] \n\t" \
+ "ldr x8, [%1, #72] \n\t" \
+ "str x8, [sp, #0] \n\t" \
+ "ldr x8, [%1, #80] \n\t" \
+ "str x8, [sp, #8] \n\t" \
+ "ldr x8, [%1, #88] \n\t" \
+ "str x8, [sp, #16] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
+ arg7,arg8,arg9,arg10,arg11, \
+ arg12) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(144) \
- /* arg12 */ \
- "ld 3,96(11)\n\t" \
- "std 3,136(1)\n\t" \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(144) \
- VG_CONTRACT_FRAME_BY(512) \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #0x30 \n\t" \
+ "ldr x0, [%1, #8] \n\t" \
+ "ldr x1, [%1, #16] \n\t" \
+ "ldr x2, [%1, #24] \n\t" \
+ "ldr x3, [%1, #32] \n\t" \
+ "ldr x4, [%1, #40] \n\t" \
+ "ldr x5, [%1, #48] \n\t" \
+ "ldr x6, [%1, #56] \n\t" \
+ "ldr x7, [%1, #64] \n\t" \
+ "ldr x8, [%1, #72] \n\t" \
+ "str x8, [sp, #0] \n\t" \
+ "ldr x8, [%1, #80] \n\t" \
+ "str x8, [sp, #8] \n\t" \
+ "ldr x8, [%1, #88] \n\t" \
+ "str x8, [sp, #16] \n\t" \
+ "ldr x8, [%1, #96] \n\t" \
+ "str x8, [sp, #24] \n\t" \
+ "ldr x8, [%1] \n\t" /* target->x8 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
+ VALGRIND_RESTORE_STACK \
+ "mov %0, x0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_arm64_linux */
+
+/* ------------------------- s390x-linux ------------------------- */
+
+#if defined(PLAT_s390x_linux)
+
+/* Similar workaround as amd64 (see above), but we use r11 as frame
+ pointer and save the old r11 in r7. r11 might be used for
+ argvec, therefore we copy argvec in r1 since r1 is clobbered
+ after the call anyway. */
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+# define __FRAME_POINTER \
+ ,"d"(__builtin_dwarf_cfa())
+# define VALGRIND_CFI_PROLOGUE \
+ ".cfi_remember_state\n\t" \
+ "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
+ "lgr 7,11\n\t" \
+ "lgr 11,%2\n\t" \
+ ".cfi_def_cfa r11, 0\n\t"
+# define VALGRIND_CFI_EPILOGUE \
+ "lgr 11, 7\n\t" \
+ ".cfi_restore_state\n\t"
+#else
+# define __FRAME_POINTER
+# define VALGRIND_CFI_PROLOGUE \
+ "lgr 1,%1\n\t"
+# define VALGRIND_CFI_EPILOGUE
+#endif
+
+/* Nb: On s390 the stack pointer is properly aligned *at all times*
+ according to the s390 GCC maintainer. (The ABI specification is not
+ precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and
+ VALGRIND_RESTORE_STACK are not defined here. */
+
+/* These regs are trashed by the hidden call. Note that we overwrite
+ r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
+ function a proper return address. All others are ABI defined call
+ clobbers. */
+#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
+ "f0","f1","f2","f3","f4","f5","f6","f7"
+
+/* Nb: Although r11 is modified in the asm snippets below (inside
+ VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for
+ two reasons:
+ (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not
+ modified
+ (2) GCC will complain that r11 cannot appear inside a clobber section,
+ when compiled with -O -fno-omit-frame-pointer
+ */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 1, 0(1)\n\t" /* target->r1 */ \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+/* The call abi has the arguments in r2-r6 and stack */
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-168\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,168\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-176\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,176\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-184\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,184\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-192\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,192\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-200\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,200\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10, arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-208\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "mvc 200(8,15), 88(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,208\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ _argvec[12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-216\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "mvc 200(8,15), 88(1)\n\t" \
+ "mvc 208(8,15), 96(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,216\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+
+#endif /* PLAT_s390x_linux */
+
+/* ------------------------- mips32-linux ----------------------- */
+
+#if defined(PLAT_mips32_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
+"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
+"$25", "$31"
+
+/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16\n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $4, 4(%1) \n\t" /* arg1*/ \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 24\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 24 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 32\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "nop\n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 32 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_ppc64_aix5 */
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 32\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 32 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 40\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 40 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 40\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 36(%1) \n\t" \
+ "sw $4, 32($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 40 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 48\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 36(%1) \n\t" \
+ "sw $4, 32($29) \n\t" \
+ "lw $4, 40(%1) \n\t" \
+ "sw $4, 36($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 48 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 48\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 36(%1) \n\t" \
+ "sw $4, 32($29) \n\t" \
+ "lw $4, 40(%1) \n\t" \
+ "sw $4, 36($29) \n\t" \
+ "lw $4, 44(%1) \n\t" \
+ "sw $4, 40($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 48 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 56\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 36(%1) \n\t" \
+ "sw $4, 32($29) \n\t" \
+ "lw $4, 40(%1) \n\t" \
+ "sw $4, 36($29) \n\t" \
+ "lw $4, 44(%1) \n\t" \
+ "sw $4, 40($29) \n\t" \
+ "lw $4, 48(%1) \n\t" \
+ "sw $4, 44($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 56 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_mips32_linux */
+
+/* ------------------------- mips64-linux ------------------------- */
+
+#if defined(PLAT_mips64_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
+"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
+"$25", "$31"
+
+/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" /* arg1*/ \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "dsubu $29, $29, 8\n\t" \
+ "ld $4, 72(%1)\n\t" \
+ "sd $4, 0($29)\n\t" \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "daddu $29, $29, 8\n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "dsubu $29, $29, 16\n\t" \
+ "ld $4, 72(%1)\n\t" \
+ "sd $4, 0($29)\n\t" \
+ "ld $4, 80(%1)\n\t" \
+ "sd $4, 8($29)\n\t" \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "daddu $29, $29, 16\n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "dsubu $29, $29, 24\n\t" \
+ "ld $4, 72(%1)\n\t" \
+ "sd $4, 0($29)\n\t" \
+ "ld $4, 80(%1)\n\t" \
+ "sd $4, 8($29)\n\t" \
+ "ld $4, 88(%1)\n\t" \
+ "sd $4, 16($29)\n\t" \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "daddu $29, $29, 24\n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "dsubu $29, $29, 32\n\t" \
+ "ld $4, 72(%1)\n\t" \
+ "sd $4, 0($29)\n\t" \
+ "ld $4, 80(%1)\n\t" \
+ "sd $4, 8($29)\n\t" \
+ "ld $4, 88(%1)\n\t" \
+ "sd $4, 16($29)\n\t" \
+ "ld $4, 96(%1)\n\t" \
+ "sd $4, 24($29)\n\t" \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "daddu $29, $29, 32\n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+#endif /* PLAT_mips64_linux */
/* ------------------------------------------------------------------ */
/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
@@ -3590,8 +6117,9 @@ typedef
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
- which use client requests. DO NOT CHANGE THE ORDER OF THESE
- ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+ which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end of the most
+ relevant group. */
typedef
enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
@@ -3611,9 +6139,14 @@ typedef
errors. */
VG_USERREQ__COUNT_ERRORS = 0x1201,
+ /* Allows the client program and/or gdbserver to execute a monitor
+ command. */
+ VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
+
/* These are useful and can be interpreted by any tool that
tracks malloc() et al, by using vg_replace_malloc.c. */
VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+ VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b,
VG_USERREQ__FREELIKE_BLOCK = 0x1302,
/* Memory pool support. */
VG_USERREQ__CREATE_MEMPOOL = 0x1303,
@@ -3626,91 +6159,161 @@ typedef
VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
/* Allow printfs to valgrind log. */
+ /* The first two pass the va_list argument by value, which
+ assumes it is the same size as or smaller than a UWord,
+ which generally isn't the case. Hence are deprecated.
+ The second two pass the vargs by reference and so are
+ immune to this problem. */
+ /* both :: char* fmt, va_list vargs (DEPRECATED) */
VG_USERREQ__PRINTF = 0x1401,
VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+ /* both :: char* fmt, va_list* vargs */
+ VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
/* Stack support. */
VG_USERREQ__STACK_REGISTER = 0x1501,
VG_USERREQ__STACK_DEREGISTER = 0x1502,
- VG_USERREQ__STACK_CHANGE = 0x1503
+ VG_USERREQ__STACK_CHANGE = 0x1503,
+
+ /* Wine support */
+ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
+
+ /* Querying of debug info. */
+ VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701,
+
+ /* Disable/enable error reporting level. Takes a single
+ Word arg which is the delta to this thread's error
+ disablement indicator. Hence 1 disables or further
+ disables errors, and -1 moves back towards enablement.
+ Other values are not allowed. */
+ VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801,
+
+ /* Some requests used for Valgrind internal, such as
+ self-test or self-hosting. */
+ /* Initialise IR injection */
+ VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901,
+ /* Used by Inner Valgrind to inform Outer Valgrind where to
+ find the list of inner guest threads */
+ VG_USERREQ__INNER_THREADS = 0x1902
} Vg_ClientRequest;
#if !defined(__GNUC__)
# define __extension__ /* */
#endif
+
/* Returns the number of Valgrinds this code is running under. That
is, 0 if running natively, 1 if running under Valgrind, 2 if
running under Valgrind which is running under another Valgrind,
etc. */
-#define RUNNING_ON_VALGRIND __extension__ \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
- VG_USERREQ__RUNNING_ON_VALGRIND, \
- 0, 0, 0, 0, 0); \
- _qzz_res; \
- })
+#define RUNNING_ON_VALGRIND \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
+ 0, 0, 0, 0, 0) \
/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
_qzz_len - 1]. Useful if you are debugging a JITter or some such,
since it provides a way to make sure valgrind will retranslate the
invalidated area. Returns no value. */
-#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__DISCARD_TRANSLATIONS, \
- _qzz_addr, _qzz_len, 0, 0, 0); \
- }
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \
+ _qzz_addr, _qzz_len, 0, 0, 0)
+#define VALGRIND_INNER_THREADS(_qzz_addr) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \
+ _qzz_addr, 0, 0, 0, 0)
-/* These requests are for getting Valgrind itself to print something.
- Possibly with a backtrace. This is a really ugly hack. */
-#if defined(NVALGRIND)
-
-# define VALGRIND_PRINTF(...)
-# define VALGRIND_PRINTF_BACKTRACE(...)
-
-#else /* NVALGRIND */
+/* These requests are for getting Valgrind itself to print something.
+ Possibly with a backtrace. This is a really ugly hack. The return value
+ is the number of characters printed, excluding the "**<pid>** " part at the
+ start and the backtrace (if present). */
+#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
/* Modern GCC will optimize the static routine out if unused,
and unused attribute will shut down warnings about it. */
static int VALGRIND_PRINTF(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
+#endif
static int
+#if defined(_MSC_VER)
+__inline
+#endif
VALGRIND_PRINTF(const char *format, ...)
{
+#if defined(NVALGRIND)
+ (void)format;
+ return 0;
+#else /* NVALGRIND */
+#if defined(_MSC_VER) || defined(__MINGW64__)
+ uintptr_t _qzz_res;
+#else
unsigned long _qzz_res;
+#endif
va_list vargs;
va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
- (unsigned long)format, (unsigned long)vargs,
+#if defined(_MSC_VER) || defined(__MINGW64__)
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
+ 0, 0, 0);
+#else
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
0, 0, 0);
+#endif
va_end(vargs);
return (int)_qzz_res;
+#endif /* NVALGRIND */
}
+#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
+#endif
static int
+#if defined(_MSC_VER)
+__inline
+#endif
VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
{
+#if defined(NVALGRIND)
+ (void)format;
+ return 0;
+#else /* NVALGRIND */
+#if defined(_MSC_VER) || defined(__MINGW64__)
+ uintptr_t _qzz_res;
+#else
unsigned long _qzz_res;
+#endif
va_list vargs;
va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
- (unsigned long)format, (unsigned long)vargs,
+#if defined(_MSC_VER) || defined(__MINGW64__)
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
0, 0, 0);
+#else
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
+ 0, 0, 0);
+#endif
va_end(vargs);
return (int)_qzz_res;
-}
-
#endif /* NVALGRIND */
+}
/* These requests allow control to move from the simulated CPU to the
- real CPU, calling an arbitary function.
+ real CPU, calling an arbitrary function.
Note that the current ThreadId is inserted as the first argument.
So this call:
@@ -3733,201 +6336,309 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
with a lot in the past.
*/
#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL0, \
- _qyy_fn, \
- 0, 0, 0, 0); \
- _qyy_res; \
- })
-
-#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL1, \
- _qyy_fn, \
- _qyy_arg1, 0, 0, 0); \
- _qyy_res; \
- })
-
-#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL2, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, 0, 0); \
- _qyy_res; \
- })
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL0, \
+ _qyy_fn, \
+ 0, 0, 0, 0)
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL1, \
+ _qyy_fn, \
+ _qyy_arg1, 0, 0, 0)
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL2, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, 0, 0)
#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL3, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, \
- _qyy_arg3, 0); \
- _qyy_res; \
- })
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL3, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, \
+ _qyy_arg3, 0)
/* Counts the number of errors that have been recorded by a tool. Nb:
the tool must record the errors with VG_(maybe_record_error)() or
VG_(unique_error)() for them to be counted. */
#define VALGRIND_COUNT_ERRORS \
- __extension__ \
- ({unsigned int _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ 0 /* default return */, \
VG_USERREQ__COUNT_ERRORS, \
- 0, 0, 0, 0, 0); \
- _qyy_res; \
- })
-
-/* Mark a block of memory as having been allocated by a malloc()-like
- function. `addr' is the start of the usable block (ie. after any
- redzone) `rzB' is redzone size if the allocator can apply redzones;
- use '0' if not. Adding redzones makes it more likely Valgrind will spot
- block overruns. `is_zeroed' indicates if the memory is zeroed, as it is
- for calloc(). Put it immediately after the point where a block is
- allocated.
+ 0, 0, 0, 0, 0)
+
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+ when heap blocks are allocated in order to give accurate results. This
+ happens automatically for the standard allocator functions such as
+ malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+ delete[], etc.
+
+ But if your program uses a custom allocator, this doesn't automatically
+ happen, and Valgrind will not do as well. For example, if you allocate
+ superblocks with mmap() and then allocates chunks of the superblocks, all
+ Valgrind's observations will be at the mmap() level and it won't know that
+ the chunks should be considered separate entities. In Memcheck's case,
+ that means you probably won't get heap block overrun detection (because
+ there won't be redzones marked as unaddressable) and you definitely won't
+ get any leak detection.
+
+ The following client requests allow a custom allocator to be annotated so
+ that it can be handled accurately by Valgrind.
+
+ VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+ by a malloc()-like function. For Memcheck (an illustrative case), this
+ does two things:
+
+ - It records that the block has been allocated. This means any addresses
+ within the block mentioned in error messages will be
+ identified as belonging to the block. It also means that if the block
+ isn't freed it will be detected by the leak checker.
+
+ - It marks the block as being addressable and undefined (if 'is_zeroed' is
+ not set), or addressable and defined (if 'is_zeroed' is set). This
+ controls how accesses to the block by the program are handled.
- If you're using Memcheck: If you're allocating memory via superblocks,
- and then handing out small chunks of each superblock, if you don't have
- redzones on your small blocks, it's worth marking the superblock with
- VALGRIND_MAKE_MEM_NOACCESS when it's created, so that block overruns are
- detected. But if you can put redzones on, it's probably better to not do
- this, so that messages for small overruns are described in terms of the
- small block rather than the superblock (but if you have a big overrun
- that skips over a redzone, you could miss an error this way). See
- memcheck/tests/custom_alloc.c for an example.
-
- WARNING: if your allocator uses malloc() or 'new' to allocate
- superblocks, rather than mmap() or brk(), this will not work properly --
- you'll likely get assertion failures during leak detection. This is
- because Valgrind doesn't like seeing overlapping heap blocks. Sorry.
-
- Nb: block must be freed via a free()-like function specified
- with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
-#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MALLOCLIKE_BLOCK, \
- addr, sizeB, rzB, is_zeroed, 0); \
- }
+ 'addr' is the start of the usable block (ie. after any
+ redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
+ can apply redzones -- these are blocks of padding at the start and end of
+ each block. Adding redzones is recommended as it makes it much more likely
+ Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
+ zeroed (or filled with another predictable value), as is the case for
+ calloc().
+
+ VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+ heap block -- that will be used by the client program -- is allocated.
+ It's best to put it at the outermost level of the allocator if possible;
+ for example, if you have a function my_alloc() which calls
+ internal_alloc(), and the client request is put inside internal_alloc(),
+ stack traces relating to the heap block will contain entries for both
+ my_alloc() and internal_alloc(), which is probably not what you want.
+
+ For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+ custom blocks from within a heap block, B, that has been allocated with
+ malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+ -- the custom blocks will take precedence.
+
+ VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
+ Memcheck, it does two things:
+
+ - It records that the block has been deallocated. This assumes that the
+ block was annotated as having been allocated via
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
+
+ - It marks the block as being unaddressable.
+
+ VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+ heap block is deallocated.
+
+ VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
+ Memcheck, it does four things:
+
+ - It records that the size of a block has been changed. This assumes that
+ the block was annotated as having been allocated via
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
+
+ - If the block shrunk, it marks the freed memory as being unaddressable.
+
+ - If the block grew, it marks the new area as undefined and defines a red
+ zone past the end of the new block.
+
+ - The V-bits of the overlap between the old and the new block are preserved.
+
+ VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
+ and before deallocation of the old block.
+
+ In many cases, these three client requests will not be enough to get your
+ allocator working well with Memcheck. More specifically, if your allocator
+ writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+ will be necessary to mark the memory as addressable just before the zeroing
+ occurs, otherwise you'll get a lot of invalid write errors. For example,
+ you'll need to do this if your allocator recycles freed blocks, but it
+ zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+ Alternatively, if your allocator reuses freed blocks for allocator-internal
+ data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+ Really, what's happening is a blurring of the lines between the client
+ program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+ memory should be considered unaddressable to the client program, but the
+ allocator knows more than the rest of the client program and so may be able
+ to safely access it. Extra client requests are necessary for Valgrind to
+ understand the distinction between the allocator and the rest of the
+ program.
+
+ Ignored if addr == 0.
+*/
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \
+ addr, sizeB, rzB, is_zeroed, 0)
-/* Mark a block of memory as having been freed by a free()-like function.
- `rzB' is redzone size; it must match that given to
- VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
- checker. Put it immediately after the point where the block is freed. */
-#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__FREELIKE_BLOCK, \
- addr, rzB, 0, 0, 0); \
- }
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+ Ignored if addr == 0.
+*/
+#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \
+ addr, oldSizeB, newSizeB, rzB, 0)
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+ Ignored if addr == 0.
+*/
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \
+ addr, rzB, 0, 0, 0)
/* Create a memory pool. */
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__CREATE_MEMPOOL, \
- pool, rzB, is_zeroed, 0, 0); \
- }
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
+ pool, rzB, is_zeroed, 0, 0)
+
+/* Create a memory pool with some flags specifying extended behaviour.
+ When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL.
+
+ The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory
+ associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used
+ by the application as superblocks to dole out MALLOC_LIKE blocks using
+ VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels"
+ pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC.
+ The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK.
+ Note that the association between the pool and the second level blocks
+ is implicit : second level blocks will be located inside first level
+ blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag
+ for such 2 levels pools, as otherwise valgrind will detect overlapping
+ memory blocks, and will abort execution (e.g. during leak search).
+
+ Such a meta pool can also be marked as an 'auto free' pool using the flag
+ VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the
+ VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE
+ will automatically free the second level blocks that are contained
+ inside the first level block freed with VALGRIND_MEMPOOL_FREE.
+ In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls
+ to VALGRIND_FREELIKE_BLOCK for all the second level blocks included
+ in the first level block.
+ Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag
+ without the VALGRIND_MEMPOOL_METAPOOL flag.
+*/
+#define VALGRIND_MEMPOOL_AUTO_FREE 1
+#define VALGRIND_MEMPOOL_METAPOOL 2
+#define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
+ pool, rzB, is_zeroed, flags, 0)
/* Destroy a memory pool. */
#define VALGRIND_DESTROY_MEMPOOL(pool) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__DESTROY_MEMPOOL, \
- pool, 0, 0, 0, 0); \
- }
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \
+ pool, 0, 0, 0, 0)
/* Associate a piece of memory with a memory pool. */
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_ALLOC, \
- pool, addr, size, 0, 0); \
- }
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \
+ pool, addr, size, 0, 0)
/* Disassociate a piece of memory from a memory pool. */
#define VALGRIND_MEMPOOL_FREE(pool, addr) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_FREE, \
- pool, addr, 0, 0, 0); \
- }
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \
+ pool, addr, 0, 0, 0)
/* Disassociate any pieces outside a particular range. */
#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_TRIM, \
- pool, addr, size, 0, 0); \
- }
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \
+ pool, addr, size, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MOVE_MEMPOOL, \
- poolA, poolB, 0, 0, 0); \
- }
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \
+ poolA, poolB, 0, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_CHANGE, \
- pool, addrA, addrB, size, 0); \
- }
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \
+ pool, addrA, addrB, size, 0)
/* Return 1 if a mempool exists, else 0. */
#define VALGRIND_MEMPOOL_EXISTS(pool) \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_EXISTS, \
- pool, 0, 0, 0, 0); \
- _qzz_res; \
- })
+ pool, 0, 0, 0, 0)
-/* Mark a piece of memory as being a stack. Returns a stack id. */
+/* Mark a piece of memory as being a stack. Returns a stack id.
+ start is the lowest addressable stack byte, end is the highest
+ addressable stack byte. */
#define VALGRIND_STACK_REGISTER(start, end) \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__STACK_REGISTER, \
- start, end, 0, 0, 0); \
- _qzz_res; \
- })
+ start, end, 0, 0, 0)
/* Unmark the piece of memory associated with a stack id as being a
stack. */
#define VALGRIND_STACK_DEREGISTER(id) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__STACK_DEREGISTER, \
- id, 0, 0, 0, 0); \
- }
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \
+ id, 0, 0, 0, 0)
-/* Change the start and end address of the stack id. */
+/* Change the start and end address of the stack id.
+ start is the new lowest addressable stack byte, end is the new highest
+ addressable stack byte. */
#define VALGRIND_STACK_CHANGE(id, start, end) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__STACK_CHANGE, \
- id, start, end, 0, 0); \
- }
-
-
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \
+ id, start, end, 0, 0)
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \
+ fd, ptr, total_size, delta, 0)
+
+/* Map a code address to a source file name and line number. buf64
+ must point to a 64-byte buffer in the caller's address space. The
+ result will be dumped in there and is guaranteed to be zero
+ terminated. If no info is found, the first byte is set to zero. */
+#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__MAP_IP_TO_SRCLOC, \
+ addr, buf64, 0, 0, 0)
+
+/* Disable error reporting for this thread. Behaves in a stack like
+ way, so you can safely call this multiple times provided that
+ VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times
+ to re-enable reporting. The first call of this macro disables
+ reporting. Subsequent calls have no effect except to increase the
+ number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable
+ reporting. Child threads do not inherit this setting from their
+ parents -- they are always created with reporting enabled. */
+#define VALGRIND_DISABLE_ERROR_REPORTING \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
+ 1, 0, 0, 0, 0)
+
+/* Re-enable error reporting, as per comments on
+ VALGRIND_DISABLE_ERROR_REPORTING. */
+#define VALGRIND_ENABLE_ERROR_REPORTING \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
+ -1, 0, 0, 0, 0)
+
+/* Execute a monitor command from the client program.
+ If a connection is opened with GDB, the output will be sent
+ according to the output mode set for vgdb.
+ If no connection is opened, output will go to the log output.
+ Returns 1 if command not recognised, 0 otherwise. */
+#define VALGRIND_MONITOR_COMMAND(command) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \
+ command, 0, 0, 0, 0)
+
+
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
+#undef PLAT_amd64_win64
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
-#undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
+#undef PLAT_ppc64be_linux
+#undef PLAT_ppc64le_linux
+#undef PLAT_arm_linux
+#undef PLAT_s390x_linux
+#undef PLAT_mips32_linux
+#undef PLAT_mips64_linux
+#undef PLAT_x86_solaris
+#undef PLAT_amd64_solaris
#endif /* __VALGRIND_H */
-
-