summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--configure.pri4
-rw-r--r--doc/global/externalsites/qtcreator.qdoc50
-rw-r--r--examples/widgets/widgets/tetrix/tetrixwindow.cpp31
-rw-r--r--mkspecs/common/msvc-version.conf6
-rw-r--r--mkspecs/features/default_pre.prf2
-rw-r--r--mkspecs/features/mac/default_post.prf5
-rw-r--r--mkspecs/features/qt_configure.prf3
-rw-r--r--qmake/Makefile.win322
-rw-r--r--qmake/doc/src/qmake-manual.qdoc22
-rw-r--r--qmake/library/qmakebuiltins.cpp330
-rw-r--r--qmake/library/qmakeevaluator.h5
-rw-r--r--src/3rdparty/pcre2/AUTHORS6
-rw-r--r--src/3rdparty/pcre2/LICENCE6
-rwxr-xr-xsrc/3rdparty/pcre2/import_from_pcre2_tarball.sh1
-rw-r--r--src/3rdparty/pcre2/pcre2.pro1
-rw-r--r--src/3rdparty/pcre2/qt_attribution.json7
-rw-r--r--src/3rdparty/pcre2/src/pcre2.h122
-rw-r--r--src/3rdparty/pcre2/src/pcre2_auto_possess.c56
-rw-r--r--src/3rdparty/pcre2/src/pcre2_compile.c31
-rw-r--r--src/3rdparty/pcre2/src/pcre2_config.c34
-rw-r--r--src/3rdparty/pcre2/src/pcre2_dfa_match.c430
-rw-r--r--src/3rdparty/pcre2/src/pcre2_extuni.c148
-rw-r--r--src/3rdparty/pcre2/src/pcre2_internal.h8
-rw-r--r--src/3rdparty/pcre2/src/pcre2_intmodedep.h31
-rw-r--r--src/3rdparty/pcre2/src/pcre2_jit_compile.c592
-rw-r--r--src/3rdparty/pcre2/src/pcre2_jit_match.c8
-rw-r--r--src/3rdparty/pcre2/src/pcre2_match.c336
-rw-r--r--src/3rdparty/pcre2/src/pcre2_pattern_info.c5
-rw-r--r--src/3rdparty/pcre2/src/pcre2_substring.c9
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitConfig.h6
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h43
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitLir.c693
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitLir.h425
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c750
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c748
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c535
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c229
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c129
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c262
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c55
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c978
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c119
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c221
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c413
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c246
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c203
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitUtils.c103
-rw-r--r--src/corelib/doc/snippets/code/src_corelib_tools_qlistdata.cpp2
-rw-r--r--src/corelib/io/qdir.cpp96
-rw-r--r--src/corelib/io/qfsfileengine_unix.cpp7
-rw-r--r--src/corelib/io/qstandardpaths_win.cpp17
-rw-r--r--src/corelib/io/qurl.cpp2
-rw-r--r--src/corelib/kernel/qcore_unix.cpp20
-rw-r--r--src/corelib/kernel/qcore_unix_p.h8
-rw-r--r--src/corelib/kernel/qcoreevent.cpp6
-rw-r--r--src/corelib/plugin/qfactoryloader.cpp24
-rw-r--r--src/corelib/plugin/qfactoryloader_p.h9
-rw-r--r--src/corelib/plugin/qlibrary.cpp5
-rw-r--r--src/corelib/plugin/qpluginloader.cpp6
-rw-r--r--src/corelib/serialization/qdatastream.cpp2
-rw-r--r--src/corelib/serialization/qjsondocument.cpp2
-rw-r--r--src/corelib/serialization/qjsonvalue.cpp1
-rw-r--r--src/corelib/tools/qchar.h2
-rw-r--r--src/corelib/tools/qcollator.cpp14
-rw-r--r--src/gui/image/qiconengine.cpp2
-rw-r--r--src/gui/kernel/qevent.cpp12
-rw-r--r--src/gui/kernel/qinputdevicemanager_p.h2
-rw-r--r--src/gui/kernel/qplatforminputcontextplugin_p.h2
-rw-r--r--src/gui/kernel/qplatformintegrationplugin.h2
-rw-r--r--src/gui/kernel/qplatformsharedgraphicscache.h2
-rw-r--r--src/gui/kernel/qplatformthemeplugin.h2
-rw-r--r--src/gui/opengl/qopenglbuffer.cpp2
-rw-r--r--src/network/access/qnetworkrequest.cpp9
-rw-r--r--src/plugins/platforms/windows/qwindowstabletsupport.cpp9
-rw-r--r--src/plugins/platforms/windows/qwindowstabletsupport.h1
-rw-r--r--src/plugins/platforms/windows/uiautomation/qwindowsuiamainprovider.cpp67
-rw-r--r--src/plugins/platforms/winrt/qwinrtscreen.cpp9
-rw-r--r--src/plugins/platforms/xcb/qxcbconnection.cpp6
-rw-r--r--src/plugins/platforms/xcb/qxcbconnection.h7
-rw-r--r--src/plugins/platforms/xcb/qxcbconnection_xi2.cpp94
-rw-r--r--src/plugins/platforms/xcb/qxcbwindow.cpp36
-rw-r--r--src/plugins/platforms/xcb/qxcbwindow.h2
-rw-r--r--src/plugins/styles/mac/qmacstyle_mac.mm2
-rw-r--r--src/widgets/doc/src/widgets-and-layouts/styles.qdoc7
-rw-r--r--src/widgets/styles/qstylesheetstyle.cpp5
-rw-r--r--tests/auto/corelib/io/largefile/tst_largefile.cpp6
-rw-r--r--tests/auto/corelib/io/qdir/tst_qdir.cpp44
-rw-r--r--tests/auto/corelib/itemmodels/qsortfilterproxymodel/tst_qsortfilterproxymodel.cpp48
-rw-r--r--tests/auto/corelib/plugin/plugin.pro1
-rw-r--r--tests/auto/corelib/plugin/qplugin/invalidplugin/invalidplugin.pro5
-rw-r--r--tests/auto/corelib/plugin/qplugin/invalidplugin/main.cpp49
-rw-r--r--tests/auto/corelib/plugin/qplugin/qplugin.pro4
-rw-r--r--tests/auto/corelib/plugin/qplugin/tst_qplugin.cpp108
-rw-r--r--tests/auto/corelib/plugin/qpluginloader/tst_qpluginloader.cpp2
-rw-r--r--tests/auto/corelib/tools/qcollator/tst_qcollator.cpp3
-rw-r--r--tests/auto/widgets/styles/qstylesheetstyle/tst_qstylesheetstyle.cpp12
96 files changed, 5754 insertions, 3480 deletions
diff --git a/configure.pri b/configure.pri
index 754e9d7317..41c3f13ea1 100644
--- a/configure.pri
+++ b/configure.pri
@@ -5,7 +5,7 @@ QT_BUILD_TREE = $$shadowed($$PWD)
# custom command line handling
defineTest(qtConfCommandline_qmakeArgs) {
- contains(1, QMAKE_[A-Z_]+ *[-+]?=.*) {
+ contains(1, QMAKE_[A-Z0-9_]+ *[-+]?=.*) {
config.input.qmakeArgs += $$1
export(config.input.qmakeArgs)
return(true)
@@ -449,8 +449,8 @@ defineTest(reloadSpec) {
eval($$l)
include($$QMAKESPEC/qmake.conf)
load(spec_post)
- load(default_pre)
CONFIG += $$_SAVED_CONFIG
+ load(default_pre)
# ensure pristine environment for configuration. again.
discard_from($$[QT_HOST_DATA/get]/mkspecs/qconfig.pri)
diff --git a/doc/global/externalsites/qtcreator.qdoc b/doc/global/externalsites/qtcreator.qdoc
index 7f2322d041..d70f35b04c 100644
--- a/doc/global/externalsites/qtcreator.qdoc
+++ b/doc/global/externalsites/qtcreator.qdoc
@@ -114,10 +114,6 @@
\title Qt Creator: Keyboard Shortcuts
*/
/*!
- \externalpage http://doc.qt.io/qtcreator/quick-screens.html
- \title Qt Creator: Creating Screens
-*/
-/*!
\externalpage http://doc.qt.io/qtcreator/qmldesigner-pathview-editor.html
\title Qt Creator: Editing PathView Properties
*/
@@ -126,6 +122,22 @@
\title Qt Creator: Adding Connections
*/
/*!
+ \externalpage http://doc.qt.io/qtcreator/quick-signals.html
+ \title Qt Creator: Connecting Objects to Signals
+*/
+* /*!
+ \externalpage http://doc.qt.io/qtcreator/quick-dynamic-properties.html
+ \title Qt Creator: Specifying Dynamic Properties
+*/
+/*!
+ \externalpage http://doc.qt.io/qtcreator/quick-property-bindings.html
+ \title Qt Creator: Adding Bindings Between Properties
+*/
+/*!
+ \externalpage http://doc.qt.io/qtcreator/quick-connections-backend.html
+ \title Qt Creator: Managing C++ Backend Objects
+*/
+/*!
\externalpage http://doc.qt.io/qtcreator/qtcreator-transitions-example.html
\title Qt Creator: Creating a Qt Quick Application
*/
@@ -147,7 +159,7 @@
*/
/*!
\externalpage http://doc.qt.io/qtcreator/creator-using-qt-quick-designer.html
- \title Qt Creator: Using Qt Quick Designer
+ \title Qt Creator: Editing QML Files in Design Mode
*/
/*!
\externalpage http://doc.qt.io/qtcreator/quick-projects.html
@@ -343,7 +355,7 @@
*/
/*!
\externalpage http://doc.qt.io/qtcreator/creator-analyzer.html
- \title Qt Creator: Detecting Memory Leaks
+ \title Qt Creator: Detecting Memory Leaks with Memcheck
*/
/*!
\externalpage http://doc.qt.io/qtcreator/creator-cache-profiler.html
@@ -351,7 +363,7 @@
*/
/*!
\externalpage http://doc.qt.io/qtcreator/creator-running-valgrind-remotely.html
- \title Qt Creator: Running Valgrind Tools Remotely
+ \title Qt Creator: Running Valgrind Tools on External Applications
*/
/*!
\externalpage http://doc.qt.io/qtcreator/creator-valgrind-overview.html
@@ -530,8 +542,12 @@
*/
/*!
- \externalpage http://doc.qt.io/qtcreator/creator-clang-static-analyzer.html
- \title Qt Creator: Using Clang Static Analyzer
+ \externalpage http://doc.qt.io/qtcreator/creator-clang-tools.html
+ \title Qt Creator: Using Clang Tools
+*/
+/*!
+ \externalpage http://doc.qt.io/qtcreator/creator-heob.html
+ \title Qt Creator: Detecting Memory Leaks with Heob
*/
/*!
\externalpage http://doc.qt.io/qtcreator/creator-cpu-usage-analyzer.html
@@ -549,3 +565,19 @@
\externalpage http://doc.qt.io/qtcreator/creator-writing-program.html
\title Creating a Qt Widget Based Application
*/
+/*!
+ \externalpage http://doc.qt.io/qtcreator/qtquick-navigator.html
+ \title Qt Creator: Managing Item Hierarchy
+*/
+/*!
+ \externalpage http://doc.qt.io/qtcreator/qtquick-properties.html
+ \title Qt Creator: Specifying Item Properties
+*/
+/*!
+ \externalpage http://doc.qt.io/qtcreator/quick-states.html
+ \title Qt Creator: Adding States
+*/
+/*!
+ \externalpage http://doc.qt.io/qtcreator/creator-scxml.html
+ \title Qt Creator: Editing State Charts
+*/
diff --git a/examples/widgets/widgets/tetrix/tetrixwindow.cpp b/examples/widgets/widgets/tetrix/tetrixwindow.cpp
index 43fbdcdea8..7e951aceb8 100644
--- a/examples/widgets/widgets/tetrix/tetrixwindow.cpp
+++ b/examples/widgets/widgets/tetrix/tetrixwindow.cpp
@@ -83,14 +83,25 @@ TetrixWindow::TetrixWindow()
pauseButton->setFocusPolicy(Qt::NoFocus);
//! [3] //! [4]
- connect(startButton, SIGNAL(clicked()), board, SLOT(start()));
+ connect(startButton, &QPushButton::clicked, board, &TetrixBoard::start);
//! [4] //! [5]
- connect(quitButton , SIGNAL(clicked()), qApp, SLOT(quit()));
- connect(pauseButton, SIGNAL(clicked()), board, SLOT(pause()));
- connect(board, SIGNAL(scoreChanged(int)), scoreLcd, SLOT(display(int)));
- connect(board, SIGNAL(levelChanged(int)), levelLcd, SLOT(display(int)));
- connect(board, SIGNAL(linesRemovedChanged(int)),
- linesLcd, SLOT(display(int)));
+ connect(quitButton , &QPushButton::clicked, qApp, &QApplication::quit);
+ connect(pauseButton, &QPushButton::clicked, board, &TetrixBoard::pause);
+#if __cplusplus >= 201402L
+ connect(board, &TetrixBoard::scoreChanged,
+ scoreLcd, qOverload<int>(&QLCDNumber::display));
+ connect(board, &TetrixBoard::levelChanged,
+ levelLcd, qOverload<int>(&QLCDNumber::display));
+ connect(board, &TetrixBoard::linesRemovedChanged,
+ linesLcd, qOverload<int>(&QLCDNumber::display));
+#else
+ connect(board, &TetrixBoard::scoreChanged,
+ scoreLcd, QOverload<int>::of(&QLCDNumber::display));
+ connect(board, &TetrixBoard::levelChanged,
+ levelLcd, QOverload<int>::of(&QLCDNumber::display));
+ connect(board, &TetrixBoard::linesRemovedChanged,
+ linesLcd, QOverload<int>::of(&QLCDNumber::display));
+#endif
//! [5]
//! [6]
@@ -117,9 +128,9 @@ TetrixWindow::TetrixWindow()
//! [7]
QLabel *TetrixWindow::createLabel(const QString &text)
{
- QLabel *lbl = new QLabel(text);
- lbl->setAlignment(Qt::AlignHCenter | Qt::AlignBottom);
- return lbl;
+ QLabel *label = new QLabel(text);
+ label->setAlignment(Qt::AlignHCenter | Qt::AlignBottom);
+ return label;
}
//! [7]
diff --git a/mkspecs/common/msvc-version.conf b/mkspecs/common/msvc-version.conf
index 3fb55c9d81..5805383a04 100644
--- a/mkspecs/common/msvc-version.conf
+++ b/mkspecs/common/msvc-version.conf
@@ -110,6 +110,12 @@ greaterThan(QMAKE_MSC_VER, 1909) {
QMAKE_CXXFLAGS_CXX14 = -std:c++14
QMAKE_CXXFLAGS_CXX1Z = -std:c++17
}
+
+ # MSVC 2017 15.8+ fixed std::aligned_storage but compilation fails without
+ # this flag since the fix breaks binary compatibility.
+ greaterThan(QMAKE_MSC_VER, 1914) {
+ DEFINES += _ENABLE_EXTENDED_ALIGNED_STORAGE
+ }
}
greaterThan(QMAKE_MSC_VER, 1910) {
diff --git a/mkspecs/features/default_pre.prf b/mkspecs/features/default_pre.prf
index 1f2f1ff2de..1c24bf071a 100644
--- a/mkspecs/features/default_pre.prf
+++ b/mkspecs/features/default_pre.prf
@@ -11,7 +11,7 @@ CONFIG = \
testcase_targets import_plugins import_qpa_plugin \
$$CONFIG
-!build_pass:!isEmpty(QT_LICHECK) {
+!build_pass:!isEmpty(QT_LICHECK):!QTDIR_build {
#
# call license checker (but cache result for one day)
#
diff --git a/mkspecs/features/mac/default_post.prf b/mkspecs/features/mac/default_post.prf
index 21d487f1f9..c6eb7c5a2c 100644
--- a/mkspecs/features/mac/default_post.prf
+++ b/mkspecs/features/mac/default_post.prf
@@ -215,5 +215,8 @@ xcode_product_bundle_identifier_setting.name = PRODUCT_BUNDLE_IDENTIFIER
xcode_product_bundle_identifier_setting.value = $$QMAKE_TARGET_BUNDLE_PREFIX
isEmpty(xcode_product_bundle_identifier_setting.value): \
xcode_product_bundle_identifier_setting.value = "com.yourcompany"
-xcode_product_bundle_identifier_setting.value = "$${xcode_product_bundle_identifier_setting.value}.${PRODUCT_NAME:rfc1034identifier}"
+xcode_product_bundle_target = $$QMAKE_BUNDLE
+isEmpty(xcode_product_bundle_target): \
+ xcode_product_bundle_target = ${PRODUCT_NAME:rfc1034identifier}
+xcode_product_bundle_identifier_setting.value = "$${xcode_product_bundle_identifier_setting.value}.$${xcode_product_bundle_target}"
QMAKE_MAC_XCODE_SETTINGS += xcode_product_bundle_identifier_setting
diff --git a/mkspecs/features/qt_configure.prf b/mkspecs/features/qt_configure.prf
index b19bb1cf75..36b016bc7e 100644
--- a/mkspecs/features/qt_configure.prf
+++ b/mkspecs/features/qt_configure.prf
@@ -1016,7 +1016,8 @@ defineTest(qtConfTest_compile) {
QMAKE_MAKE = "$$QMAKE_MAKE clean && $$QMAKE_MAKE"
mkpath($$test_out_dir)|error()
- write_file($$test_base_out_dir/.qmake.cache)|error()
+ cont = "CONFIG += QTDIR_build"
+ write_file($$test_base_out_dir/.qmake.cache, cont)|error()
$${1}.literal_args += $$qtConfAllLibraryArgs($$eval($${1}.resolved_uses))
diff --git a/qmake/Makefile.win32 b/qmake/Makefile.win32
index 851185f3ff..a1699bd6f8 100644
--- a/qmake/Makefile.win32
+++ b/qmake/Makefile.win32
@@ -39,7 +39,7 @@ CFLAGS_BARE = -c -Fo./ -Fdqmake.pdb \
-D_CRT_SECURE_NO_WARNINGS -D_SCL_SECURE_NO_WARNINGS \
-DQT_VERSION_STR=\"$(QT_VERSION)\" -DQT_VERSION_MAJOR=$(QT_MAJOR_VERSION) -DQT_VERSION_MINOR=$(QT_MINOR_VERSION) -DQT_VERSION_PATCH=$(QT_PATCH_VERSION) \
-DQT_BUILD_QMAKE -DQT_BOOTSTRAPPED -DPROEVALUATOR_FULL \
- -DQT_NO_FOREACH -DUNICODE
+ -DQT_NO_FOREACH -DUNICODE -D_ENABLE_EXTENDED_ALIGNED_STORAGE
CFLAGS = $(CFLAGS_PCH) $(CFLAGS_BARE) $(CFLAGS)
CXXFLAGS_BARE = $(CFLAGS_BARE)
diff --git a/qmake/doc/src/qmake-manual.qdoc b/qmake/doc/src/qmake-manual.qdoc
index 22c34adccd..409062cf49 100644
--- a/qmake/doc/src/qmake-manual.qdoc
+++ b/qmake/doc/src/qmake-manual.qdoc
@@ -1046,7 +1046,7 @@
library and header files. The proper include and library paths for the
Qt library will automatically be added to the project. This is defined
by default, and can be fine-tuned with the \c{\l{#qt}{QT}} variable.
- \row \li x11 \li The target is a X11 application or library. The proper
+ \row \li x11 \li The target is an X11 application or library. The proper
include paths and libraries will automatically be added to the
project.
\row \li testcase \li The target is an automated test.
@@ -1282,7 +1282,7 @@
\section1 LEXOBJECTS
Specifies the names of intermediate Lex object
- files.The value of this variable is typically handled by
+ files. The value of this variable is typically handled by
qmake and rarely needs to be modified.
\target LEXSOURCES
@@ -1779,9 +1779,9 @@
\note This variable is used on \macos, iOS, tvOS, and watchOS only.
- For projects where the build target is an \macos, iOS, tvOS, or watchOS framework, this
- variable is used to specify the version number that will be applied to the
- framework that is built.
+ For projects where the build target is a \macos, iOS, tvOS, or watchOS
+ framework, this variable is used to specify the version number that will be
+ applied to the framework that is built.
By default, this variable contains the same value as the \l{#VERSION}{VERSION}
variable.
@@ -1865,7 +1865,7 @@
\note This variable is used on Unix platforms only.
Specifies the location of X11 header file paths to be added
- to \l{INCLUDEPATH} when building a X11 target. The value of this variable
+ to \l{INCLUDEPATH} when building an X11 target. The value of this variable
is typically handled by qmake or
\l{#QMAKESPEC}{qmake.conf} and rarely needs to be modified.
@@ -2255,7 +2255,7 @@
\section1 QMAKE_QMAKE
- Contains the abosolute path of the qmake executable.
+ Contains the absolute path of the qmake executable.
\note Do not attempt to overwrite the value of this variable.
@@ -2323,7 +2323,7 @@
If defined, the value of this variable is used as a path to be prepended to
the built shared library's \c SONAME identifier. The \c SONAME is the
identifier that the dynamic linker will later use to reference the library.
- In general this reference may be a library name or full library path. On \macos,
+ In general, this reference may be a library name or full library path. On \macos,
iOS, tvOS, and watchOS, the path may be specified relatively using the following
placeholders:
@@ -2629,7 +2629,7 @@
\section1 TARGET_x.y.z
- Specifies the extension of \c TARGET with version number. The
+ Specifies the extension of \c TARGET with a version number. The
value of this variable is typically handled by
qmake or \l{#QMAKESPEC}{qmake.conf} and rarely
needs to be modified.
@@ -2650,7 +2650,7 @@
The subdirectories are specified using the \l{#SUBDIRS}{SUBDIRS}
variable.
\row \li aux \li Creates a Makefile for not building anything. Use this if no compiler
- needs to be invoked to create the target, for instance because your
+ needs to be invoked to create the target; for instance, because your
project is written in an interpreted language.
\note This template type is only available for Makefile-based
generators. In particular, it will not work with the vcxproj and
@@ -2832,7 +2832,7 @@
Windows Phone.
\row
\li logo_480x800
- \li Splash sceen image file of size 480x800 pixels. This is only supported on
+ \li Splash screen image file of size 480x800 pixels. This is only supported on
Windows Phone.
\row
\li logo_large
diff --git a/qmake/library/qmakebuiltins.cpp b/qmake/library/qmakebuiltins.cpp
index 1181435b18..fda1e1c593 100644
--- a/qmake/library/qmakebuiltins.cpp
+++ b/qmake/library/qmakebuiltins.cpp
@@ -1223,6 +1223,171 @@ QMakeEvaluator::VisitReturn QMakeEvaluator::evaluateBuiltinExpand(
return ReturnTrue;
}
+QMakeEvaluator::VisitReturn QMakeEvaluator::testFunc_cache(const ProStringList &args)
+{
+ bool persist = true;
+ enum { TargetStash, TargetCache, TargetSuper } target = TargetCache;
+ enum { CacheSet, CacheAdd, CacheSub } mode = CacheSet;
+ ProKey srcvar;
+ if (args.count() >= 2) {
+ const auto opts = split_value_list(args.at(1).toQStringRef());
+ for (const ProString &opt : opts) {
+ if (opt == QLatin1String("transient")) {
+ persist = false;
+ } else if (opt == QLatin1String("super")) {
+ target = TargetSuper;
+ } else if (opt == QLatin1String("stash")) {
+ target = TargetStash;
+ } else if (opt == QLatin1String("set")) {
+ mode = CacheSet;
+ } else if (opt == QLatin1String("add")) {
+ mode = CacheAdd;
+ } else if (opt == QLatin1String("sub")) {
+ mode = CacheSub;
+ } else {
+ evalError(fL1S("cache(): invalid flag %1.").arg(opt.toQStringView()));
+ return ReturnFalse;
+ }
+ }
+ if (args.count() >= 3) {
+ srcvar = args.at(2).toKey();
+ } else if (mode != CacheSet) {
+ evalError(fL1S("cache(): modes other than 'set' require a source variable."));
+ return ReturnFalse;
+ }
+ }
+ QString varstr;
+ ProKey dstvar = args.at(0).toKey();
+ if (!dstvar.isEmpty()) {
+ if (srcvar.isEmpty())
+ srcvar = dstvar;
+ ProValueMap::Iterator srcvarIt;
+ if (!findValues(srcvar, &srcvarIt)) {
+ evalError(fL1S("Variable %1 is not defined.").arg(srcvar.toQStringView()));
+ return ReturnFalse;
+ }
+ // The caches for the host and target may differ (e.g., when we are manipulating
+ // CONFIG), so we cannot compute a common new value for both.
+ const ProStringList &diffval = *srcvarIt;
+ ProStringList newval;
+ bool changed = false;
+ for (bool hostBuild = false; ; hostBuild = true) {
+#ifdef PROEVALUATOR_THREAD_SAFE
+ m_option->mutex.lock();
+#endif
+ QMakeBaseEnv *baseEnv =
+ m_option->baseEnvs.value(QMakeBaseKey(m_buildRoot, m_stashfile, hostBuild));
+#ifdef PROEVALUATOR_THREAD_SAFE
+ // It's ok to unlock this before locking baseEnv,
+ // as we have no intention to initialize the env.
+ m_option->mutex.unlock();
+#endif
+ do {
+ if (!baseEnv)
+ break;
+#ifdef PROEVALUATOR_THREAD_SAFE
+ QMutexLocker locker(&baseEnv->mutex);
+ if (baseEnv->inProgress && baseEnv->evaluator != this) {
+ // The env is still in the works, but it may be already past the cache
+ // loading. So we need to wait for completion and amend it as usual.
+ QThreadPool::globalInstance()->releaseThread();
+ baseEnv->cond.wait(&baseEnv->mutex);
+ QThreadPool::globalInstance()->reserveThread();
+ }
+ if (!baseEnv->isOk)
+ break;
+#endif
+ QMakeEvaluator *baseEval = baseEnv->evaluator;
+ const ProStringList &oldval = baseEval->values(dstvar);
+ if (mode == CacheSet) {
+ newval = diffval;
+ } else {
+ newval = oldval;
+ if (mode == CacheAdd)
+ newval += diffval;
+ else
+ newval.removeEach(diffval);
+ }
+ if (oldval != newval) {
+ if (target != TargetStash || !m_stashfile.isEmpty()) {
+ baseEval->valuesRef(dstvar) = newval;
+ if (target == TargetSuper) {
+ do {
+ if (dstvar == QLatin1String("QMAKEPATH")) {
+ baseEval->m_qmakepath = newval.toQStringList();
+ baseEval->updateMkspecPaths();
+ } else if (dstvar == QLatin1String("QMAKEFEATURES")) {
+ baseEval->m_qmakefeatures = newval.toQStringList();
+ } else {
+ break;
+ }
+ baseEval->updateFeaturePaths();
+ if (hostBuild == m_hostBuild)
+ m_featureRoots = baseEval->m_featureRoots;
+ } while (false);
+ }
+ }
+ changed = true;
+ }
+ } while (false);
+ if (hostBuild)
+ break;
+ }
+ // We assume that whatever got the cached value to be what it is now will do so
+ // the next time as well, so we just skip the persisting if nothing changed.
+ if (!persist || !changed)
+ return ReturnTrue;
+ varstr = dstvar.toQString();
+ if (mode == CacheAdd)
+ varstr += QLatin1String(" +=");
+ else if (mode == CacheSub)
+ varstr += QLatin1String(" -=");
+ else
+ varstr += QLatin1String(" =");
+ if (diffval.count() == 1) {
+ varstr += QLatin1Char(' ');
+ varstr += quoteValue(diffval.at(0));
+ } else if (!diffval.isEmpty()) {
+ for (const ProString &vval : diffval) {
+ varstr += QLatin1String(" \\\n ");
+ varstr += quoteValue(vval);
+ }
+ }
+ varstr += QLatin1Char('\n');
+ }
+ QString fn;
+ QMakeVfs::VfsFlags flags = (m_cumulative ? QMakeVfs::VfsCumulative : QMakeVfs::VfsExact);
+ if (target == TargetSuper) {
+ if (m_superfile.isEmpty()) {
+ m_superfile = QDir::cleanPath(m_outputDir + QLatin1String("/.qmake.super"));
+ printf("Info: creating super cache file %s\n", qPrintable(QDir::toNativeSeparators(m_superfile)));
+ valuesRef(ProKey("_QMAKE_SUPER_CACHE_")) << ProString(m_superfile);
+ }
+ fn = m_superfile;
+ } else if (target == TargetCache) {
+ if (m_cachefile.isEmpty()) {
+ m_cachefile = QDir::cleanPath(m_outputDir + QLatin1String("/.qmake.cache"));
+ printf("Info: creating cache file %s\n", qPrintable(QDir::toNativeSeparators(m_cachefile)));
+ valuesRef(ProKey("_QMAKE_CACHE_")) << ProString(m_cachefile);
+ // We could update m_{source,build}Root and m_featureRoots here, or even
+ // "re-home" our rootEnv, but this doesn't sound too useful - if somebody
+ // wanted qmake to find something in the build directory, he could have
+ // done so "from the outside".
+ // The sub-projects will find the new cache all by themselves.
+ }
+ fn = m_cachefile;
+ } else {
+ fn = m_stashfile;
+ if (fn.isEmpty())
+ fn = QDir::cleanPath(m_outputDir + QLatin1String("/.qmake.stash"));
+ if (!m_vfs->exists(fn, flags)) {
+ printf("Info: creating stash file %s\n", qPrintable(QDir::toNativeSeparators(fn)));
+ valuesRef(ProKey("_QMAKE_STASH_")) << ProString(fn);
+ }
+ }
+ return writeFile(fL1S("cache "), fn, QIODevice::Append, flags, varstr);
+}
+
QMakeEvaluator::VisitReturn QMakeEvaluator::evaluateBuiltinConditional(
const QMakeInternal::QMakeBuiltin &adef, const ProKey &function, const ProStringList &args)
{
@@ -1687,169 +1852,8 @@ QMakeEvaluator::VisitReturn QMakeEvaluator::evaluateBuiltinConditional(
#endif
return ReturnTrue;
}
- case T_CACHE: {
- bool persist = true;
- enum { TargetStash, TargetCache, TargetSuper } target = TargetCache;
- enum { CacheSet, CacheAdd, CacheSub } mode = CacheSet;
- ProKey srcvar;
- if (args.count() >= 2) {
- const auto opts = split_value_list(args.at(1).toQStringRef());
- for (const ProString &opt : opts) {
- if (opt == QLatin1String("transient")) {
- persist = false;
- } else if (opt == QLatin1String("super")) {
- target = TargetSuper;
- } else if (opt == QLatin1String("stash")) {
- target = TargetStash;
- } else if (opt == QLatin1String("set")) {
- mode = CacheSet;
- } else if (opt == QLatin1String("add")) {
- mode = CacheAdd;
- } else if (opt == QLatin1String("sub")) {
- mode = CacheSub;
- } else {
- evalError(fL1S("cache(): invalid flag %1.").arg(opt.toQStringView()));
- return ReturnFalse;
- }
- }
- if (args.count() >= 3) {
- srcvar = args.at(2).toKey();
- } else if (mode != CacheSet) {
- evalError(fL1S("cache(): modes other than 'set' require a source variable."));
- return ReturnFalse;
- }
- }
- QString varstr;
- ProKey dstvar = args.at(0).toKey();
- if (!dstvar.isEmpty()) {
- if (srcvar.isEmpty())
- srcvar = dstvar;
- ProValueMap::Iterator srcvarIt;
- if (!findValues(srcvar, &srcvarIt)) {
- evalError(fL1S("Variable %1 is not defined.").arg(srcvar.toQStringView()));
- return ReturnFalse;
- }
- // The caches for the host and target may differ (e.g., when we are manipulating
- // CONFIG), so we cannot compute a common new value for both.
- const ProStringList &diffval = *srcvarIt;
- ProStringList newval;
- bool changed = false;
- for (bool hostBuild = false; ; hostBuild = true) {
-#ifdef PROEVALUATOR_THREAD_SAFE
- m_option->mutex.lock();
-#endif
- QMakeBaseEnv *baseEnv =
- m_option->baseEnvs.value(QMakeBaseKey(m_buildRoot, m_stashfile, hostBuild));
-#ifdef PROEVALUATOR_THREAD_SAFE
- // It's ok to unlock this before locking baseEnv,
- // as we have no intention to initialize the env.
- m_option->mutex.unlock();
-#endif
- do {
- if (!baseEnv)
- break;
-#ifdef PROEVALUATOR_THREAD_SAFE
- QMutexLocker locker(&baseEnv->mutex);
- if (baseEnv->inProgress && baseEnv->evaluator != this) {
- // The env is still in the works, but it may be already past the cache
- // loading. So we need to wait for completion and amend it as usual.
- QThreadPool::globalInstance()->releaseThread();
- baseEnv->cond.wait(&baseEnv->mutex);
- QThreadPool::globalInstance()->reserveThread();
- }
- if (!baseEnv->isOk)
- break;
-#endif
- QMakeEvaluator *baseEval = baseEnv->evaluator;
- const ProStringList &oldval = baseEval->values(dstvar);
- if (mode == CacheSet) {
- newval = diffval;
- } else {
- newval = oldval;
- if (mode == CacheAdd)
- newval += diffval;
- else
- newval.removeEach(diffval);
- }
- if (oldval != newval) {
- if (target != TargetStash || !m_stashfile.isEmpty()) {
- baseEval->valuesRef(dstvar) = newval;
- if (target == TargetSuper) {
- do {
- if (dstvar == QLatin1String("QMAKEPATH")) {
- baseEval->m_qmakepath = newval.toQStringList();
- baseEval->updateMkspecPaths();
- } else if (dstvar == QLatin1String("QMAKEFEATURES")) {
- baseEval->m_qmakefeatures = newval.toQStringList();
- } else {
- break;
- }
- baseEval->updateFeaturePaths();
- if (hostBuild == m_hostBuild)
- m_featureRoots = baseEval->m_featureRoots;
- } while (false);
- }
- }
- changed = true;
- }
- } while (false);
- if (hostBuild)
- break;
- }
- // We assume that whatever got the cached value to be what it is now will do so
- // the next time as well, so we just skip the persisting if nothing changed.
- if (!persist || !changed)
- return ReturnTrue;
- varstr = dstvar.toQString();
- if (mode == CacheAdd)
- varstr += QLatin1String(" +=");
- else if (mode == CacheSub)
- varstr += QLatin1String(" -=");
- else
- varstr += QLatin1String(" =");
- if (diffval.count() == 1) {
- varstr += QLatin1Char(' ');
- varstr += quoteValue(diffval.at(0));
- } else if (!diffval.isEmpty()) {
- for (const ProString &vval : diffval) {
- varstr += QLatin1String(" \\\n ");
- varstr += quoteValue(vval);
- }
- }
- varstr += QLatin1Char('\n');
- }
- QString fn;
- QMakeVfs::VfsFlags flags = (m_cumulative ? QMakeVfs::VfsCumulative : QMakeVfs::VfsExact);
- if (target == TargetSuper) {
- if (m_superfile.isEmpty()) {
- m_superfile = QDir::cleanPath(m_outputDir + QLatin1String("/.qmake.super"));
- printf("Info: creating super cache file %s\n", qPrintable(QDir::toNativeSeparators(m_superfile)));
- valuesRef(ProKey("_QMAKE_SUPER_CACHE_")) << ProString(m_superfile);
- }
- fn = m_superfile;
- } else if (target == TargetCache) {
- if (m_cachefile.isEmpty()) {
- m_cachefile = QDir::cleanPath(m_outputDir + QLatin1String("/.qmake.cache"));
- printf("Info: creating cache file %s\n", qPrintable(QDir::toNativeSeparators(m_cachefile)));
- valuesRef(ProKey("_QMAKE_CACHE_")) << ProString(m_cachefile);
- // We could update m_{source,build}Root and m_featureRoots here, or even
- // "re-home" our rootEnv, but this doesn't sound too useful - if somebody
- // wanted qmake to find something in the build directory, he could have
- // done so "from the outside".
- // The sub-projects will find the new cache all by themselves.
- }
- fn = m_cachefile;
- } else {
- fn = m_stashfile;
- if (fn.isEmpty())
- fn = QDir::cleanPath(m_outputDir + QLatin1String("/.qmake.stash"));
- if (!m_vfs->exists(fn, flags)) {
- printf("Info: creating stash file %s\n", qPrintable(QDir::toNativeSeparators(fn)));
- valuesRef(ProKey("_QMAKE_STASH_")) << ProString(fn);
- }
- }
- return writeFile(fL1S("cache "), fn, QIODevice::Append, flags, varstr);
- }
+ case T_CACHE:
+ return testFunc_cache(args);
case T_RELOAD_PROPERTIES:
#ifdef QT_BUILD_QMAKE
m_option->reloadProperties();
diff --git a/qmake/library/qmakeevaluator.h b/qmake/library/qmakeevaluator.h
index 303a23064c..b87aaa0eec 100644
--- a/qmake/library/qmakeevaluator.h
+++ b/qmake/library/qmakeevaluator.h
@@ -250,6 +250,11 @@ public:
#endif
QByteArray getCommandOutput(const QString &args, int *exitCode) const;
+private:
+ // Implementation detail of evaluateBuiltinConditional():
+ VisitReturn testFunc_cache(const ProStringList &args);
+
+public:
QMakeEvaluator *m_caller;
#ifdef PROEVALUATOR_CUMULATIVE
bool m_cumulative;
diff --git a/src/3rdparty/pcre2/AUTHORS b/src/3rdparty/pcre2/AUTHORS
index e056ad6868..d5592bbc5b 100644
--- a/src/3rdparty/pcre2/AUTHORS
+++ b/src/3rdparty/pcre2/AUTHORS
@@ -8,7 +8,7 @@ Email domain: cam.ac.uk
University of Cambridge Computing Service,
Cambridge, England.
-Copyright (c) 1997-2017 University of Cambridge
+Copyright (c) 1997-2018 University of Cambridge
All rights reserved
@@ -19,7 +19,7 @@ Written by: Zoltan Herczeg
Email local part: hzmester
Emain domain: freemail.hu
-Copyright(c) 2010-2017 Zoltan Herczeg
+Copyright(c) 2010-2018 Zoltan Herczeg
All rights reserved.
@@ -30,7 +30,7 @@ Written by: Zoltan Herczeg
Email local part: hzmester
Emain domain: freemail.hu
-Copyright(c) 2009-2017 Zoltan Herczeg
+Copyright(c) 2009-2018 Zoltan Herczeg
All rights reserved.
####
diff --git a/src/3rdparty/pcre2/LICENCE b/src/3rdparty/pcre2/LICENCE
index 2b34d3f62b..bfe3c8d528 100644
--- a/src/3rdparty/pcre2/LICENCE
+++ b/src/3rdparty/pcre2/LICENCE
@@ -26,7 +26,7 @@ Email domain: cam.ac.uk
University of Cambridge Computing Service,
Cambridge, England.
-Copyright (c) 1997-2017 University of Cambridge
+Copyright (c) 1997-2018 University of Cambridge
All rights reserved.
@@ -37,7 +37,7 @@ Written by: Zoltan Herczeg
Email local part: hzmester
Emain domain: freemail.hu
-Copyright(c) 2010-2017 Zoltan Herczeg
+Copyright(c) 2010-2018 Zoltan Herczeg
All rights reserved.
@@ -48,7 +48,7 @@ Written by: Zoltan Herczeg
Email local part: hzmester
Emain domain: freemail.hu
-Copyright(c) 2009-2017 Zoltan Herczeg
+Copyright(c) 2009-2018 Zoltan Herczeg
All rights reserved.
diff --git a/src/3rdparty/pcre2/import_from_pcre2_tarball.sh b/src/3rdparty/pcre2/import_from_pcre2_tarball.sh
index dc2fb05453..05eda2c8ca 100755
--- a/src/3rdparty/pcre2/import_from_pcre2_tarball.sh
+++ b/src/3rdparty/pcre2/import_from_pcre2_tarball.sh
@@ -88,6 +88,7 @@ FILES="
src/pcre2_context.c
src/pcre2_dfa_match.c
src/pcre2_error.c
+ src/pcre2_extuni.c
src/pcre2_find_bracket.c
src/pcre2_internal.h
src/pcre2_intmodedep.h
diff --git a/src/3rdparty/pcre2/pcre2.pro b/src/3rdparty/pcre2/pcre2.pro
index 296e65cd59..f2fddd19be 100644
--- a/src/3rdparty/pcre2/pcre2.pro
+++ b/src/3rdparty/pcre2/pcre2.pro
@@ -27,6 +27,7 @@ SOURCES += \
$$PWD/src/pcre2_context.c \
$$PWD/src/pcre2_dfa_match.c \
$$PWD/src/pcre2_error.c \
+ $$PWD/src/pcre2_extuni.c \
$$PWD/src/pcre2_find_bracket.c \
$$PWD/src/pcre2_jit_compile.c \
$$PWD/src/pcre2_maketables.c \
diff --git a/src/3rdparty/pcre2/qt_attribution.json b/src/3rdparty/pcre2/qt_attribution.json
index 33c1a58878..d09c1cc2c9 100644
--- a/src/3rdparty/pcre2/qt_attribution.json
+++ b/src/3rdparty/pcre2/qt_attribution.json
@@ -6,12 +6,13 @@
"Description": "The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5.",
"Homepage": "http://www.pcre.org/",
- "Version": "10.30",
+ "Version": "10.31",
+ "DownloadLocation": "https://ftp.pcre.org/pub/pcre/pcre2-10.31.tar.bz2",
"License": "BSD 3-clause \"New\" or \"Revised\" License",
"LicenseId": "BSD-3-Clause",
"LicenseFile": "LICENCE",
- "Copyright": "Copyright (c) 1997-2017 University of Cambridge
-Copyright (c) 2009-2017 Zoltan Herczeg
+ "Copyright": "Copyright (c) 1997-2018 University of Cambridge
+Copyright (c) 2009-2018 Zoltan Herczeg
Copyright (c) 2007-2012 Google Inc.
Copyright (c) 2013-2013 Tilera Corporation (jiwang@tilera.com)"
}
diff --git a/src/3rdparty/pcre2/src/pcre2.h b/src/3rdparty/pcre2/src/pcre2.h
index 5a4533909d..fffcc307d0 100644
--- a/src/3rdparty/pcre2/src/pcre2.h
+++ b/src/3rdparty/pcre2/src/pcre2.h
@@ -42,9 +42,9 @@ POSSIBILITY OF SUCH DAMAGE.
/* The current PCRE version information. */
#define PCRE2_MAJOR 10
-#define PCRE2_MINOR 30
+#define PCRE2_MINOR 31
#define PCRE2_PRERELEASE
-#define PCRE2_DATE 2017-08-14
+#define PCRE2_DATE 2018-02-12
/* When an application links to a PCRE DLL in Windows, the symbols that are
imported have to be identified as such. When building PCRE2, the appropriate
@@ -208,7 +208,104 @@ greater than zero. */
#define PCRE2_BSR_UNICODE 1
#define PCRE2_BSR_ANYCRLF 2
-/* Error codes: no match and partial match are "expected" errors. */
+/* Error codes for pcre2_compile(). Some of these are also used by
+pcre2_pattern_convert(). */
+
+#define PCRE2_ERROR_END_BACKSLASH 101
+#define PCRE2_ERROR_END_BACKSLASH_C 102
+#define PCRE2_ERROR_UNKNOWN_ESCAPE 103
+#define PCRE2_ERROR_QUANTIFIER_OUT_OF_ORDER 104
+#define PCRE2_ERROR_QUANTIFIER_TOO_BIG 105
+#define PCRE2_ERROR_MISSING_SQUARE_BRACKET 106
+#define PCRE2_ERROR_ESCAPE_INVALID_IN_CLASS 107
+#define PCRE2_ERROR_CLASS_RANGE_ORDER 108
+#define PCRE2_ERROR_QUANTIFIER_INVALID 109
+#define PCRE2_ERROR_INTERNAL_UNEXPECTED_REPEAT 110
+#define PCRE2_ERROR_INVALID_AFTER_PARENS_QUERY 111
+#define PCRE2_ERROR_POSIX_CLASS_NOT_IN_CLASS 112
+#define PCRE2_ERROR_POSIX_NO_SUPPORT_COLLATING 113
+#define PCRE2_ERROR_MISSING_CLOSING_PARENTHESIS 114
+#define PCRE2_ERROR_BAD_SUBPATTERN_REFERENCE 115
+#define PCRE2_ERROR_NULL_PATTERN 116
+#define PCRE2_ERROR_BAD_OPTIONS 117
+#define PCRE2_ERROR_MISSING_COMMENT_CLOSING 118
+#define PCRE2_ERROR_PARENTHESES_NEST_TOO_DEEP 119
+#define PCRE2_ERROR_PATTERN_TOO_LARGE 120
+#define PCRE2_ERROR_HEAP_FAILED 121
+#define PCRE2_ERROR_UNMATCHED_CLOSING_PARENTHESIS 122
+#define PCRE2_ERROR_INTERNAL_CODE_OVERFLOW 123
+#define PCRE2_ERROR_MISSING_CONDITION_CLOSING 124
+#define PCRE2_ERROR_LOOKBEHIND_NOT_FIXED_LENGTH 125
+#define PCRE2_ERROR_ZERO_RELATIVE_REFERENCE 126
+#define PCRE2_ERROR_TOO_MANY_CONDITION_BRANCHES 127
+#define PCRE2_ERROR_CONDITION_ASSERTION_EXPECTED 128
+#define PCRE2_ERROR_BAD_RELATIVE_REFERENCE 129
+#define PCRE2_ERROR_UNKNOWN_POSIX_CLASS 130
+#define PCRE2_ERROR_INTERNAL_STUDY_ERROR 131
+#define PCRE2_ERROR_UNICODE_NOT_SUPPORTED 132
+#define PCRE2_ERROR_PARENTHESES_STACK_CHECK 133
+#define PCRE2_ERROR_CODE_POINT_TOO_BIG 134
+#define PCRE2_ERROR_LOOKBEHIND_TOO_COMPLICATED 135
+#define PCRE2_ERROR_LOOKBEHIND_INVALID_BACKSLASH_C 136
+#define PCRE2_ERROR_UNSUPPORTED_ESCAPE_SEQUENCE 137
+#define PCRE2_ERROR_CALLOUT_NUMBER_TOO_BIG 138
+#define PCRE2_ERROR_MISSING_CALLOUT_CLOSING 139
+#define PCRE2_ERROR_ESCAPE_INVALID_IN_VERB 140
+#define PCRE2_ERROR_UNRECOGNIZED_AFTER_QUERY_P 141
+#define PCRE2_ERROR_MISSING_NAME_TERMINATOR 142
+#define PCRE2_ERROR_DUPLICATE_SUBPATTERN_NAME 143
+#define PCRE2_ERROR_INVALID_SUBPATTERN_NAME 144
+#define PCRE2_ERROR_UNICODE_PROPERTIES_UNAVAILABLE 145
+#define PCRE2_ERROR_MALFORMED_UNICODE_PROPERTY 146
+#define PCRE2_ERROR_UNKNOWN_UNICODE_PROPERTY 147
+#define PCRE2_ERROR_SUBPATTERN_NAME_TOO_LONG 148
+#define PCRE2_ERROR_TOO_MANY_NAMED_SUBPATTERNS 149
+#define PCRE2_ERROR_CLASS_INVALID_RANGE 150
+#define PCRE2_ERROR_OCTAL_BYTE_TOO_BIG 151
+#define PCRE2_ERROR_INTERNAL_OVERRAN_WORKSPACE 152
+#define PCRE2_ERROR_INTERNAL_MISSING_SUBPATTERN 153
+#define PCRE2_ERROR_DEFINE_TOO_MANY_BRANCHES 154
+#define PCRE2_ERROR_BACKSLASH_O_MISSING_BRACE 155
+#define PCRE2_ERROR_INTERNAL_UNKNOWN_NEWLINE 156
+#define PCRE2_ERROR_BACKSLASH_G_SYNTAX 157
+#define PCRE2_ERROR_PARENS_QUERY_R_MISSING_CLOSING 158
+#define PCRE2_ERROR_VERB_ARGUMENT_NOT_ALLOWED 159
+#define PCRE2_ERROR_VERB_UNKNOWN 160
+#define PCRE2_ERROR_SUBPATTERN_NUMBER_TOO_BIG 161
+#define PCRE2_ERROR_SUBPATTERN_NAME_EXPECTED 162
+#define PCRE2_ERROR_INTERNAL_PARSED_OVERFLOW 163
+#define PCRE2_ERROR_INVALID_OCTAL 164
+#define PCRE2_ERROR_SUBPATTERN_NAMES_MISMATCH 165
+#define PCRE2_ERROR_MARK_MISSING_ARGUMENT 166
+#define PCRE2_ERROR_INVALID_HEXADECIMAL 167
+#define PCRE2_ERROR_BACKSLASH_C_SYNTAX 168
+#define PCRE2_ERROR_BACKSLASH_K_SYNTAX 169
+#define PCRE2_ERROR_INTERNAL_BAD_CODE_LOOKBEHINDS 170
+#define PCRE2_ERROR_BACKSLASH_N_IN_CLASS 171
+#define PCRE2_ERROR_CALLOUT_STRING_TOO_LONG 172
+#define PCRE2_ERROR_UNICODE_DISALLOWED_CODE_POINT 173
+#define PCRE2_ERROR_UTF_IS_DISABLED 174
+#define PCRE2_ERROR_UCP_IS_DISABLED 175
+#define PCRE2_ERROR_VERB_NAME_TOO_LONG 176
+#define PCRE2_ERROR_BACKSLASH_U_CODE_POINT_TOO_BIG 177
+#define PCRE2_ERROR_MISSING_OCTAL_OR_HEX_DIGITS 178
+#define PCRE2_ERROR_VERSION_CONDITION_SYNTAX 179
+#define PCRE2_ERROR_INTERNAL_BAD_CODE_AUTO_POSSESS 180
+#define PCRE2_ERROR_CALLOUT_NO_STRING_DELIMITER 181
+#define PCRE2_ERROR_CALLOUT_BAD_STRING_DELIMITER 182
+#define PCRE2_ERROR_BACKSLASH_C_CALLER_DISABLED 183
+#define PCRE2_ERROR_QUERY_BARJX_NEST_TOO_DEEP 184
+#define PCRE2_ERROR_BACKSLASH_C_LIBRARY_DISABLED 185
+#define PCRE2_ERROR_PATTERN_TOO_COMPLICATED 186
+#define PCRE2_ERROR_LOOKBEHIND_TOO_LONG 187
+#define PCRE2_ERROR_PATTERN_STRING_TOO_LONG 188
+#define PCRE2_ERROR_INTERNAL_BAD_CODE 189
+#define PCRE2_ERROR_INTERNAL_BAD_CODE_IN_SKIP 190
+#define PCRE2_ERROR_NO_SURROGATES_IN_UTF16 191
+#define PCRE2_ERROR_BAD_LITERAL_OPTIONS 192
+
+
+/* "Expected" matching error codes: no match and partial match. */
#define PCRE2_ERROR_NOMATCH (-1)
#define PCRE2_ERROR_PARTIAL (-2)
@@ -248,10 +345,10 @@ greater than zero. */
#define PCRE2_ERROR_UTF32_ERR1 (-27)
#define PCRE2_ERROR_UTF32_ERR2 (-28)
-/* Error codes for pcre2[_dfa]_match(), substring extraction functions, context
-functions, and serializing functions. They are in numerical order. Originally
-they were in alphabetical order too, but now that PCRE2 is released, the
-numbers must not be changed. */
+/* Miscellaneous error codes for pcre2[_dfa]_match(), substring extraction
+functions, context functions, and serializing functions. They are in numerical
+order. Originally they were in alphabetical order too, but now that PCRE2 is
+released, the numbers must not be changed. */
#define PCRE2_ERROR_BADDATA (-29)
#define PCRE2_ERROR_MIXEDTABLES (-30) /* Name was changed */
@@ -321,6 +418,7 @@ numbers must not be changed. */
#define PCRE2_INFO_HASBACKSLASHC 23
#define PCRE2_INFO_FRAMESIZE 24
#define PCRE2_INFO_HEAPLIMIT 25
+#define PCRE2_INFO_EXTRAOPTIONS 26
/* Request types for pcre2_config(). */
@@ -338,6 +436,9 @@ numbers must not be changed. */
#define PCRE2_CONFIG_UNICODE_VERSION 10
#define PCRE2_CONFIG_VERSION 11
#define PCRE2_CONFIG_HEAPLIMIT 12
+#define PCRE2_CONFIG_NEVER_BACKSLASH_C 13
+#define PCRE2_CONFIG_COMPILED_WIDTHS 14
+
/* Types for code units in patterns and subject strings. */
@@ -393,6 +494,11 @@ without changing the API of the function, thereby allowing old clients to work
without modification. Define the generic version in a macro; the width-specific
versions are generated from this macro below. */
+/* Flags for the callout_flags field. These are cleared after a callout. */
+
+#define PCRE2_CALLOUT_STARTMATCH 0x00000001u /* Set for each bumpalong */
+#define PCRE2_CALLOUT_BACKTRACK 0x00000002u /* Set after a backtrack */
+
#define PCRE2_STRUCTURE_LIST \
typedef struct pcre2_callout_block { \
uint32_t version; /* Identifies version of block */ \
@@ -412,6 +518,8 @@ typedef struct pcre2_callout_block { \
PCRE2_SIZE callout_string_offset; /* Offset to string within pattern */ \
PCRE2_SIZE callout_string_length; /* Length of string compiled into pattern */ \
PCRE2_SPTR callout_string; /* String compiled into pattern */ \
+ /* ------------------- Added for Version 2 -------------------------- */ \
+ uint32_t callout_flags; /* See above for list */ \
/* ------------------------------------------------------------------ */ \
} pcre2_callout_block; \
\
diff --git a/src/3rdparty/pcre2/src/pcre2_auto_possess.c b/src/3rdparty/pcre2/src/pcre2_auto_possess.c
index ad3543f627..23275a2e39 100644
--- a/src/3rdparty/pcre2/src/pcre2_auto_possess.c
+++ b/src/3rdparty/pcre2/src/pcre2_auto_possess.c
@@ -558,47 +558,73 @@ for(;;)
continue;
}
+ /* At the end of a branch, skip to the end of the group. */
+
if (c == OP_ALT)
{
do code += GET(code, 1); while (*code == OP_ALT);
c = *code;
}
+ /* Inspect the next opcode. */
+
switch(c)
{
- case OP_END:
- case OP_KETRPOS:
- /* TRUE only in greedy case. The non-greedy case could be replaced by
- an OP_EXACT, but it is probably not worth it. (And note that OP_EXACT
- uses more memory, which we cannot get at this stage.) */
+ /* We can always possessify a greedy iterator at the end of the pattern,
+ which is reached after skipping over the final OP_KET. A non-greedy
+ iterator must never be possessified. */
+ case OP_END:
return base_list[1] != 0;
+ /* When an iterator is at the end of certain kinds of group we can inspect
+ what follows the group by skipping over the closing ket. Note that this
+ does not apply to OP_KETRMAX or OP_KETRMIN because what follows any given
+ iteration is variable (could be another iteration or could be the next
+ item). As these two opcodes are not listed in the next switch, they will
+ end up as the next code to inspect, and return FALSE by virtue of being
+ unsupported. */
+
case OP_KET:
- /* If the bracket is capturing, and referenced by an OP_RECURSE, or
- it is an atomic sub-pattern (assert, once, etc.) the non-greedy case
- cannot be converted to a possessive form. */
+ case OP_KETRPOS:
+ /* The non-greedy case cannot be converted to a possessive form. */
if (base_list[1] == 0) return FALSE;
+ /* If the bracket is capturing it might be referenced by an OP_RECURSE
+ so its last iterator can never be possessified if the pattern contains
+ recursions. (This could be improved by keeping a list of group numbers that
+ are called by recursion.) */
+
switch(*(code - GET(code, 1)))
{
+ case OP_CBRA:
+ case OP_SCBRA:
+ case OP_CBRAPOS:
+ case OP_SCBRAPOS:
+ if (cb->had_recurse) return FALSE;
+ break;
+
+ /* Atomic sub-patterns and assertions can always auto-possessify their
+ last iterator. However, if the group was entered as a result of checking
+ a previous iterator, this is not possible. */
+
case OP_ASSERT:
case OP_ASSERT_NOT:
case OP_ASSERTBACK:
case OP_ASSERTBACK_NOT:
case OP_ONCE:
- /* Atomic sub-patterns and assertions can always auto-possessify their
- last iterator. However, if the group was entered as a result of checking
- a previous iterator, this is not possible. */
-
return !entered_a_group;
}
+ /* Skip over the bracket and inspect what comes next. */
+
code += PRIV(OP_lengths)[c];
continue;
+ /* Handle cases where the next item is a group. */
+
case OP_ONCE:
case OP_BRA:
case OP_CBRA:
@@ -637,11 +663,15 @@ for(;;)
code += PRIV(OP_lengths)[c];
continue;
+ /* The next opcode does not need special handling; fall through and use it
+ to see if the base can be possessified. */
+
default:
break;
}
- /* Check for a supported opcode, and load its properties. */
+ /* We now have the next appropriate opcode to compare with the base. Check
+ for a supported opcode, and load its properties. */
code = get_chr_property_list(code, utf, cb->fcc, list);
if (code == NULL) return FALSE; /* Unsupported */
diff --git a/src/3rdparty/pcre2/src/pcre2_compile.c b/src/3rdparty/pcre2/src/pcre2_compile.c
index 44ee2502c8..87530fb584 100644
--- a/src/3rdparty/pcre2/src/pcre2_compile.c
+++ b/src/3rdparty/pcre2/src/pcre2_compile.c
@@ -2194,8 +2194,8 @@ manage_callouts(PCRE2_SPTR ptr, uint32_t **pcalloutptr, BOOL auto_callout,
{
uint32_t *previous_callout = *pcalloutptr;
-if (previous_callout != NULL) previous_callout[2] = ptr - cb->start_pattern -
- (PCRE2_SIZE)previous_callout[1];
+if (previous_callout != NULL) previous_callout[2] = (uint32_t)(ptr -
+ cb->start_pattern - (PCRE2_SIZE)previous_callout[1]);
if (!auto_callout) previous_callout = NULL; else
{
@@ -3806,7 +3806,7 @@ while (ptr < ptrend)
/* Remember the offset to the next item in the pattern, and set a default
length. This should get updated after the next item is read. */
- previous_callout[1] = ptr - cb->start_pattern;
+ previous_callout[1] = (uint32_t)(ptr - cb->start_pattern);
previous_callout[2] = 0;
break; /* End callout */
@@ -5599,14 +5599,17 @@ for (;; pptr++)
/* ===================================================================*/
/* Deal with (*VERB)s. */
- /* Check for open captures before ACCEPT and convert it to ASSERT_ACCEPT if
- in an assertion. In the first pass, just accumulate the length required;
+ /* Check for open captures before ACCEPT and close those that are within
+ the same assertion level, also converting ACCEPT to ASSERT_ACCEPT in an
+ assertion. In the first pass, just accumulate the length required;
otherwise hitting (*ACCEPT) inside many nested parentheses can cause
workspace overflow. Do not set firstcu after *ACCEPT. */
case META_ACCEPT:
cb->had_accept = TRUE;
- for (oc = cb->open_caps; oc != NULL; oc = oc->next)
+ for (oc = cb->open_caps;
+ oc != NULL && oc->assert_depth >= cb->assert_depth;
+ oc = oc->next)
{
if (lengthptr != NULL)
{
@@ -7132,7 +7135,7 @@ for (;; pptr++)
later. */
HANDLE_SINGLE_REFERENCE:
- if (firstcuflags == REQ_UNSET) firstcuflags = REQ_NONE;
+ if (firstcuflags == REQ_UNSET) zerofirstcuflags = firstcuflags = REQ_NONE;
*code++ = ((options & PCRE2_CASELESS) != 0)? OP_REFI : OP_REF;
PUT2INC(code, 0, meta_arg);
@@ -7483,6 +7486,7 @@ if (*code == OP_CBRA)
capitem.number = capnumber;
capitem.next = cb->open_caps;
capitem.flag = FALSE;
+ capitem.assert_depth = cb->assert_depth;
cb->open_caps = &capitem;
}
@@ -8102,13 +8106,13 @@ REQ_NONE in the flags.
Arguments:
code points to start of compiled pattern
flags points to the first code unit flags
- inassert TRUE if in an assertion
+ inassert non-zero if in an assertion
Returns: the fixed first code unit, or 0 with REQ_NONE in flags
*/
static uint32_t
-find_firstassertedcu(PCRE2_SPTR code, int32_t *flags, BOOL inassert)
+find_firstassertedcu(PCRE2_SPTR code, int32_t *flags, uint32_t inassert)
{
uint32_t c = 0;
int cflags = REQ_NONE;
@@ -8135,7 +8139,7 @@ do {
case OP_SCBRAPOS:
case OP_ASSERT:
case OP_ONCE:
- d = find_firstassertedcu(scode, &dflags, op == OP_ASSERT);
+ d = find_firstassertedcu(scode, &dflags, inassert + ((op==OP_ASSERT)?1:0));
if (dflags < 0)
return 0;
if (cflags < 0) { c = d; cflags = dflags; }
@@ -8150,7 +8154,7 @@ do {
case OP_PLUS:
case OP_MINPLUS:
case OP_POSPLUS:
- if (!inassert) return 0;
+ if (inassert == 0) return 0;
if (cflags < 0) { c = scode[1]; cflags = 0; }
else if (c != scode[1]) return 0;
break;
@@ -8163,7 +8167,7 @@ do {
case OP_PLUSI:
case OP_MINPLUSI:
case OP_POSPLUSI:
- if (!inassert) return 0;
+ if (inassert == 0) return 0;
if (cflags < 0) { c = scode[1]; cflags = REQ_CASELESS; }
else if (c != scode[1]) return 0;
break;
@@ -9481,6 +9485,7 @@ re->blocksize = re_blocksize;
re->magic_number = MAGIC_NUMBER;
re->compile_options = options;
re->overall_options = cb.external_options;
+re->extra_options = ccontext->extra_options;
re->flags = PCRE2_CODE_UNIT_WIDTH/8 | cb.external_flags | setflags;
re->limit_heap = limit_heap;
re->limit_match = limit_match;
@@ -9670,7 +9675,7 @@ if ((re->overall_options & PCRE2_NO_START_OPTIMIZE) == 0)
actual literals that follow). */
if (firstcuflags < 0)
- firstcu = find_firstassertedcu(codestart, &firstcuflags, FALSE);
+ firstcu = find_firstassertedcu(codestart, &firstcuflags, 0);
/* Save the data for a first code unit. */
diff --git a/src/3rdparty/pcre2/src/pcre2_config.c b/src/3rdparty/pcre2/src/pcre2_config.c
index d009c0a676..e487b10220 100644
--- a/src/3rdparty/pcre2/src/pcre2_config.c
+++ b/src/3rdparty/pcre2/src/pcre2_config.c
@@ -84,11 +84,13 @@ if (where == NULL) /* Requests a length */
return PCRE2_ERROR_BADOPTION;
case PCRE2_CONFIG_BSR:
+ case PCRE2_CONFIG_COMPILED_WIDTHS:
+ case PCRE2_CONFIG_DEPTHLIMIT:
case PCRE2_CONFIG_HEAPLIMIT:
case PCRE2_CONFIG_JIT:
case PCRE2_CONFIG_LINKSIZE:
case PCRE2_CONFIG_MATCHLIMIT:
- case PCRE2_CONFIG_DEPTHLIMIT:
+ case PCRE2_CONFIG_NEVER_BACKSLASH_C:
case PCRE2_CONFIG_NEWLINE:
case PCRE2_CONFIG_PARENSLIMIT:
case PCRE2_CONFIG_STACKRECURSE: /* Obsolete */
@@ -117,6 +119,24 @@ switch (what)
#endif
break;
+ case PCRE2_CONFIG_COMPILED_WIDTHS:
+ *((uint32_t *)where) = 0
+#ifdef SUPPORT_PCRE2_8
+ + 1
+#endif
+#ifdef SUPPORT_PCRE2_16
+ + 2
+#endif
+#ifdef SUPPORT_PCRE2_32
+ + 4
+#endif
+ ;
+ break;
+
+ case PCRE2_CONFIG_DEPTHLIMIT:
+ *((uint32_t *)where) = MATCH_LIMIT_DEPTH;
+ break;
+
case PCRE2_CONFIG_HEAPLIMIT:
*((uint32_t *)where) = HEAP_LIMIT;
break;
@@ -148,14 +168,18 @@ switch (what)
*((uint32_t *)where) = MATCH_LIMIT;
break;
- case PCRE2_CONFIG_DEPTHLIMIT:
- *((uint32_t *)where) = MATCH_LIMIT_DEPTH;
- break;
-
case PCRE2_CONFIG_NEWLINE:
*((uint32_t *)where) = NEWLINE_DEFAULT;
break;
+ case PCRE2_CONFIG_NEVER_BACKSLASH_C:
+#ifdef NEVER_BACKSLASH_C
+ *((uint32_t *)where) = 1;
+#else
+ *((uint32_t *)where) = 0;
+#endif
+ break;
+
case PCRE2_CONFIG_PARENSLIMIT:
*((uint32_t *)where) = PARENS_NEST_LIMIT;
break;
diff --git a/src/3rdparty/pcre2/src/pcre2_dfa_match.c b/src/3rdparty/pcre2/src/pcre2_dfa_match.c
index 5ae13944c7..c6184ff5e9 100644
--- a/src/3rdparty/pcre2/src/pcre2_dfa_match.c
+++ b/src/3rdparty/pcre2/src/pcre2_dfa_match.c
@@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language.
Written by Philip Hazel
Original API code Copyright (c) 1997-2012 University of Cambridge
- New API code Copyright (c) 2016-2017 University of Cambridge
+ New API code Copyright (c) 2016-2018 University of Cambridge
-----------------------------------------------------------------------------
Redistribution and use in source and binary forms, with or without
@@ -294,6 +294,66 @@ typedef struct stateblock {
/*************************************************
+* Process a callout *
+*************************************************/
+
+/* This function is called to perform a callout.
+
+Arguments:
+ code current code pointer
+ offsets points to current capture offsets
+ current_subject start of current subject match
+ ptr current position in subject
+ mb the match block
+ extracode extra code offset when called from condition
+ lengthptr where to return the callout length
+
+Returns: the return from the callout
+*/
+
+static int
+do_callout(PCRE2_SPTR code, PCRE2_SIZE *offsets, PCRE2_SPTR current_subject,
+ PCRE2_SPTR ptr, dfa_match_block *mb, PCRE2_SIZE extracode,
+ PCRE2_SIZE *lengthptr)
+{
+pcre2_callout_block *cb = mb->cb;
+
+*lengthptr = (code[extracode] == OP_CALLOUT)?
+ (PCRE2_SIZE)PRIV(OP_lengths)[OP_CALLOUT] :
+ (PCRE2_SIZE)GET(code, 1 + 2*LINK_SIZE + extracode);
+
+if (mb->callout == NULL) return 0; /* No callout provided */
+
+/* Fixed fields in the callout block are set once and for all at the start of
+matching. */
+
+cb->offset_vector = offsets;
+cb->start_match = (PCRE2_SIZE)(current_subject - mb->start_subject);
+cb->current_position = (PCRE2_SIZE)(ptr - mb->start_subject);
+cb->pattern_position = GET(code, 1 + extracode);
+cb->next_item_length = GET(code, 1 + LINK_SIZE + extracode);
+
+if (code[extracode] == OP_CALLOUT)
+ {
+ cb->callout_number = code[1 + 2*LINK_SIZE + extracode];
+ cb->callout_string_offset = 0;
+ cb->callout_string = NULL;
+ cb->callout_string_length = 0;
+ }
+else
+ {
+ cb->callout_number = 0;
+ cb->callout_string_offset = GET(code, 1 + 3*LINK_SIZE + extracode);
+ cb->callout_string = code + (1 + 4*LINK_SIZE + extracode) + 1;
+ cb->callout_string_length = *lengthptr - (1 + 4*LINK_SIZE) - 2;
+ }
+
+return (mb->callout)(cb, mb->callout_data);
+}
+
+
+
+/*************************************************
* Match a Regular Expression - DFA engine *
*************************************************/
@@ -448,7 +508,8 @@ if (*this_start_code == OP_ASSERTBACK || *this_start_code == OP_ASSERTBACK_NOT)
{
if (current_subject <= start_subject) break;
current_subject--;
- ACROSSCHAR(current_subject > start_subject, *current_subject, current_subject--);
+ ACROSSCHAR(current_subject > start_subject, current_subject,
+ current_subject--);
}
}
else
@@ -1364,63 +1425,14 @@ for (;;)
if (count > 0) { ADD_ACTIVE(state_offset + 2, 0); }
if (clen > 0)
{
- uint32_t lgb, rgb;
- PCRE2_SPTR nptr = ptr + clen;
int ncount = 0;
if (count > 0 && codevalue == OP_EXTUNI_EXTRA + OP_TYPEPOSPLUS)
{
active_count--; /* Remove non-match possibility */
next_active_state--;
}
- lgb = UCD_GRAPHBREAK(c);
- while (nptr < end_subject)
- {
- dlen = 1;
- if (!utf) d = *nptr; else { GETCHARLEN(d, nptr, dlen); }
- rgb = UCD_GRAPHBREAK(d);
- if ((PRIV(ucp_gbtable)[lgb] & (1u << rgb)) == 0) break;
-
- /* Not breaking between Regional Indicators is allowed only if
- there are an even number of preceding RIs. */
-
- if (lgb == ucp_gbRegionalIndicator &&
- rgb == ucp_gbRegionalIndicator)
- {
- int ricount = 0;
- PCRE2_SPTR bptr = nptr - 1;
-#ifdef SUPPORT_UNICODE
- if (utf) BACKCHAR(bptr);
-#endif
- /* bptr is pointing to the left-hand character */
-
- while (bptr > mb->start_subject)
- {
- bptr--;
-#ifdef SUPPORT_UNICODE
- if (utf)
- {
- BACKCHAR(bptr);
- GETCHAR(d, bptr);
- }
- else
-#endif
- d = *bptr;
- if (UCD_GRAPHBREAK(d) != ucp_gbRegionalIndicator) break;
- ricount++;
- }
- if ((ricount & 1) != 0) break; /* Grapheme break required */
- }
-
- /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
- any number of Extend before a following E_Modifier. */
-
- if (rgb != ucp_gbExtend ||
- (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
- lgb = rgb;
-
- ncount++;
- nptr += dlen;
- }
+ (void)PRIV(extuni)(c, ptr + clen, mb->start_subject, end_subject, utf,
+ &ncount);
count++;
ADD_NEW_DATA(-state_offset, count, ncount);
}
@@ -1663,8 +1675,6 @@ for (;;)
ADD_ACTIVE(state_offset + 2, 0);
if (clen > 0)
{
- uint32_t lgb, rgb;
- PCRE2_SPTR nptr = ptr + clen;
int ncount = 0;
if (codevalue == OP_EXTUNI_EXTRA + OP_TYPEPOSSTAR ||
codevalue == OP_EXTUNI_EXTRA + OP_TYPEPOSQUERY)
@@ -1672,55 +1682,8 @@ for (;;)
active_count--; /* Remove non-match possibility */
next_active_state--;
}
- lgb = UCD_GRAPHBREAK(c);
- while (nptr < end_subject)
- {
- dlen = 1;
- if (!utf) d = *nptr; else { GETCHARLEN(d, nptr, dlen); }
- rgb = UCD_GRAPHBREAK(d);
- if ((PRIV(ucp_gbtable)[lgb] & (1u << rgb)) == 0) break;
-
- /* Not breaking between Regional Indicators is allowed only if
- there are an even number of preceding RIs. */
-
- if (lgb == ucp_gbRegionalIndicator &&
- rgb == ucp_gbRegionalIndicator)
- {
- int ricount = 0;
- PCRE2_SPTR bptr = nptr - 1;
-#ifdef SUPPORT_UNICODE
- if (utf) BACKCHAR(bptr);
-#endif
- /* bptr is pointing to the left-hand character */
-
- while (bptr > mb->start_subject)
- {
- bptr--;
-#ifdef SUPPORT_UNICODE
- if (utf)
- {
- BACKCHAR(bptr);
- GETCHAR(d, bptr);
- }
- else
-#endif
- d = *bptr;
- if (UCD_GRAPHBREAK(d) != ucp_gbRegionalIndicator) break;
- ricount++;
- }
- if ((ricount & 1) != 0) break; /* Grapheme break required */
- }
-
- /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
- any number of Extend before a following E_Modifier. */
-
- if (rgb != ucp_gbExtend ||
- (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
- lgb = rgb;
-
- ncount++;
- nptr += dlen;
- }
+ (void)PRIV(extuni)(c, ptr + clen, mb->start_subject, end_subject, utf,
+ &ncount);
ADD_NEW_DATA(-(state_offset + count), 0, ncount);
}
break;
@@ -1973,63 +1936,15 @@ for (;;)
count = current_state->count; /* Number already matched */
if (clen > 0)
{
- uint32_t lgb, rgb;
- PCRE2_SPTR nptr = ptr + clen;
+ PCRE2_SPTR nptr;
int ncount = 0;
if (codevalue == OP_EXTUNI_EXTRA + OP_TYPEPOSUPTO)
{
active_count--; /* Remove non-match possibility */
next_active_state--;
}
- lgb = UCD_GRAPHBREAK(c);
- while (nptr < end_subject)
- {
- dlen = 1;
- if (!utf) d = *nptr; else { GETCHARLEN(d, nptr, dlen); }
- rgb = UCD_GRAPHBREAK(d);
- if ((PRIV(ucp_gbtable)[lgb] & (1u << rgb)) == 0) break;
-
- /* Not breaking between Regional Indicators is allowed only if
- there are an even number of preceding RIs. */
-
- if (lgb == ucp_gbRegionalIndicator &&
- rgb == ucp_gbRegionalIndicator)
- {
- int ricount = 0;
- PCRE2_SPTR bptr = nptr - 1;
-#ifdef SUPPORT_UNICODE
- if (utf) BACKCHAR(bptr);
-#endif
- /* bptr is pointing to the left-hand character */
-
- while (bptr > mb->start_subject)
- {
- bptr--;
-#ifdef SUPPORT_UNICODE
- if (utf)
- {
- BACKCHAR(bptr);
- GETCHAR(d, bptr);
- }
- else
-#endif
- d = *bptr;
- if (UCD_GRAPHBREAK(d) != ucp_gbRegionalIndicator) break;
- ricount++;
- }
- if ((ricount & 1) != 0) break; /* Grapheme break required */
- }
-
- /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
- any number of Extend before a following E_Modifier. */
-
- if (rgb != ucp_gbExtend ||
- (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
- lgb = rgb;
-
- ncount++;
- nptr += dlen;
- }
+ nptr = PRIV(extuni)(c, ptr + clen, mb->start_subject, end_subject, utf,
+ &ncount);
if (nptr >= end_subject && (mb->moptions & PCRE2_PARTIAL_HARD) != 0)
reset_could_continue = TRUE;
if (++count >= (int)GET2(code, 1))
@@ -2206,58 +2121,9 @@ for (;;)
case OP_EXTUNI:
if (clen > 0)
{
- uint32_t lgb, rgb;
- PCRE2_SPTR nptr = ptr + clen;
int ncount = 0;
- lgb = UCD_GRAPHBREAK(c);
- while (nptr < end_subject)
- {
- dlen = 1;
- if (!utf) d = *nptr; else { GETCHARLEN(d, nptr, dlen); }
- rgb = UCD_GRAPHBREAK(d);
- if ((PRIV(ucp_gbtable)[lgb] & (1u << rgb)) == 0) break;
-
- /* Not breaking between Regional Indicators is allowed only if
- there are an even number of preceding RIs. */
-
- if (lgb == ucp_gbRegionalIndicator &&
- rgb == ucp_gbRegionalIndicator)
- {
- int ricount = 0;
- PCRE2_SPTR bptr = nptr - 1;
-#ifdef SUPPORT_UNICODE
- if (utf) BACKCHAR(bptr);
-#endif
- /* bptr is pointing to the left-hand character */
-
- while (bptr > mb->start_subject)
- {
- bptr--;
-#ifdef SUPPORT_UNICODE
- if (utf)
- {
- BACKCHAR(bptr);
- GETCHAR(d, bptr);
- }
- else
-#endif
- d = *bptr;
- if (UCD_GRAPHBREAK(d) != ucp_gbRegionalIndicator) break;
- ricount++;
- }
- if ((ricount & 1) != 0) break; /* Grapheme break required */
- }
-
- /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
- any number of Extend before a following E_Modifier. */
-
- if (rgb != ucp_gbExtend ||
- (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
- lgb = rgb;
-
- ncount++;
- nptr += dlen;
- }
+ PCRE2_SPTR nptr = PRIV(extuni)(c, ptr + clen, mb->start_subject,
+ end_subject, utf, &ncount);
if (nptr >= end_subject && (mb->moptions & PCRE2_PARTIAL_HARD) != 0)
reset_could_continue = TRUE;
ADD_NEW_DATA(-(state_offset + 1), 0, ncount);
@@ -2371,7 +2237,7 @@ for (;;)
case OP_NOTI:
if (clen > 0)
{
- unsigned int otherd;
+ uint32_t otherd;
#ifdef SUPPORT_UNICODE
if (utf && d >= 128)
otherd = UCD_OTHERCASE(d);
@@ -2761,45 +2627,10 @@ for (;;)
if (code[LINK_SIZE + 1] == OP_CALLOUT
|| code[LINK_SIZE + 1] == OP_CALLOUT_STR)
{
- PCRE2_SIZE callout_length = (code[LINK_SIZE + 1] == OP_CALLOUT)?
- (PCRE2_SIZE)PRIV(OP_lengths)[OP_CALLOUT] :
- (PCRE2_SIZE)GET(code, 2 + 3*LINK_SIZE);
-
- rrc = 0;
- if (mb->callout != NULL)
- {
- pcre2_callout_block cb;
- cb.version = 1;
- cb.capture_top = 1;
- cb.capture_last = 0;
- cb.offset_vector = offsets;
- cb.mark = NULL; /* No (*MARK) support */
- cb.subject = start_subject;
- cb.subject_length = (PCRE2_SIZE)(end_subject - start_subject);
- cb.start_match = (PCRE2_SIZE)(current_subject - start_subject);
- cb.current_position = (PCRE2_SIZE)(ptr - start_subject);
- cb.pattern_position = GET(code, LINK_SIZE + 2);
- cb.next_item_length = GET(code, LINK_SIZE + 2 + LINK_SIZE);
-
- if (code[LINK_SIZE + 1] == OP_CALLOUT)
- {
- cb.callout_number = code[2 + 3*LINK_SIZE];
- cb.callout_string_offset = 0;
- cb.callout_string = NULL;
- cb.callout_string_length = 0;
- }
- else
- {
- cb.callout_number = 0;
- cb.callout_string_offset = GET(code, 2 + 4*LINK_SIZE);
- cb.callout_string = code + (2 + 5*LINK_SIZE) + 1;
- cb.callout_string_length =
- callout_length - (1 + 4*LINK_SIZE) - 2;
- }
-
- if ((rrc = (mb->callout)(&cb, mb->callout_data)) < 0)
- return rrc; /* Abandon */
- }
+ PCRE2_SIZE callout_length;
+ rrc = do_callout(code, offsets, current_subject, ptr, mb,
+ 1 + LINK_SIZE, &callout_length);
+ if (rrc < 0) return rrc; /* Abandon */
if (rrc > 0) break; /* Fail this thread */
code += callout_length; /* Skip callout data */
}
@@ -3131,44 +2962,10 @@ for (;;)
case OP_CALLOUT:
case OP_CALLOUT_STR:
{
- unsigned int callout_length = (*code == OP_CALLOUT)
- ? PRIV(OP_lengths)[OP_CALLOUT] : GET(code, 1 + 2*LINK_SIZE);
- rrc = 0;
-
- if (mb->callout != NULL)
- {
- pcre2_callout_block cb;
- cb.version = 1;
- cb.capture_top = 1;
- cb.capture_last = 0;
- cb.offset_vector = offsets;
- cb.mark = NULL; /* No (*MARK) support */
- cb.subject = start_subject;
- cb.subject_length = (PCRE2_SIZE)(end_subject - start_subject);
- cb.start_match = (PCRE2_SIZE)(current_subject - start_subject);
- cb.current_position = (PCRE2_SIZE)(ptr - start_subject);
- cb.pattern_position = GET(code, 1);
- cb.next_item_length = GET(code, 1 + LINK_SIZE);
-
- if (*code == OP_CALLOUT)
- {
- cb.callout_number = code[1 + 2*LINK_SIZE];
- cb.callout_string_offset = 0;
- cb.callout_string = NULL;
- cb.callout_string_length = 0;
- }
- else
- {
- cb.callout_number = 0;
- cb.callout_string_offset = GET(code, 1 + 3*LINK_SIZE);
- cb.callout_string = code + (1 + 4*LINK_SIZE) + 1;
- cb.callout_string_length =
- callout_length - (1 + 4*LINK_SIZE) - 2;
- }
-
- if ((rrc = (mb->callout)(&cb, mb->callout_data)) < 0)
- return rrc; /* Abandon */
- }
+ PCRE2_SIZE callout_length;
+ rrc = do_callout(code, offsets, current_subject, ptr, mb, 0,
+ &callout_length);
+ if (rrc < 0) return rrc; /* Abandon */
if (rrc == 0)
{ ADD_ACTIVE(state_offset + (int)callout_length, 0); }
}
@@ -3287,6 +3084,7 @@ const uint8_t *start_bits = NULL;
/* We need to have mb pointing to a match block, because the IS_NEWLINE macro
is used below, and it expects NLBLOCK to be defined as a pointer. */
+pcre2_callout_block cb;
dfa_match_block actual_match_block;
dfa_match_block *mb = &actual_match_block;
@@ -3364,9 +3162,21 @@ startline = (re->flags & PCRE2_STARTLINE) != 0;
firstline = (re->overall_options & PCRE2_FIRSTLINE) != 0;
bumpalong_limit = end_subject;
-/* Get data from the match context, if present, and fill in the fields in the
-match block. It is an error to set an offset limit without setting the flag at
-compile time. */
+/* Initialize and set up the fixed fields in the callout block, with a pointer
+in the match block. */
+
+mb->cb = &cb;
+cb.version = 2;
+cb.subject = subject;
+cb.subject_length = (PCRE2_SIZE)(end_subject - subject);
+cb.callout_flags = 0;
+cb.capture_top = 1; /* No capture support */
+cb.capture_last = 0;
+cb.mark = NULL; /* No (*MARK) support */
+
+/* Get data from the match context, if present, and fill in the remaining
+fields in the match block. It is an error to set an offset limit without
+setting the flag at compile time. */
if (mcontext == NULL)
{
@@ -3554,13 +3364,13 @@ for (;;)
if ((re->overall_options & PCRE2_NO_START_OPTIMIZE) == 0 &&
(options & PCRE2_DFA_RESTART) == 0)
{
- PCRE2_SPTR save_end_subject = end_subject;
-
/* If firstline is TRUE, the start of the match is constrained to the first
line of a multiline string. That is, the match must be before or at the
- first newline. Implement this by temporarily adjusting end_subject so that
- we stop the optimization scans for a first code unit at a newline. If the
- match fails at the newline, later code breaks this loop. */
+ first newline following the start of matching. Temporarily adjust
+ end_subject so that we stop the optimization scans for a first code unit
+ immediately after the first character of a newline (the first code unit can
+ legitimately be a newline). If the match fails at the newline, later code
+ breaks this loop. */
if (firstline)
{
@@ -3568,15 +3378,15 @@ for (;;)
#ifdef SUPPORT_UNICODE
if (utf)
{
- while (t < mb->end_subject && !IS_NEWLINE(t))
+ while (t < end_subject && !IS_NEWLINE(t))
{
t++;
- ACROSSCHAR(t < end_subject, *t, t++);
+ ACROSSCHAR(t < end_subject, t, t++);
}
}
else
#endif
- while (t < mb->end_subject && !IS_NEWLINE(t)) t++;
+ while (t < end_subject && !IS_NEWLINE(t)) t++;
end_subject = t;
}
@@ -3648,14 +3458,18 @@ for (;;)
#endif
}
- /* If we can't find the required code unit, break the bumpalong loop,
- to force a match failure, except when doing partial matching, when we
- let the next cycle run at the end of the subject. To see why, consider
- the pattern /(?<=abc)def/, which partially matches "abc", even though
- the string does not contain the starting character "d". */
+ /* If we can't find the required code unit, having reached the true end
+ of the subject, break the bumpalong loop, to force a match failure,
+ except when doing partial matching, when we let the next cycle run at
+ the end of the subject. To see why, consider the pattern /(?<=abc)def/,
+ which partially matches "abc", even though the string does not contain
+ the starting character "d". If we have not reached the true end of the
+ subject (PCRE2_FIRSTLINE caused end_subject to be temporarily modified)
+ we also let the cycle run, because the matching string is legitimately
+ allowed to start with the first code unit of a newline. */
if ((mb->moptions & (PCRE2_PARTIAL_HARD|PCRE2_PARTIAL_SOFT)) == 0 &&
- start_match >= end_subject)
+ start_match >= mb->end_subject)
break;
}
@@ -3672,8 +3486,7 @@ for (;;)
while (start_match < end_subject && !WAS_NEWLINE(start_match))
{
start_match++;
- ACROSSCHAR(start_match < end_subject, *start_match,
- start_match++);
+ ACROSSCHAR(start_match < end_subject, start_match, start_match++);
}
}
else
@@ -3709,12 +3522,18 @@ for (;;)
if ((start_bits[c/8] & (1 << (c&7))) != 0) break;
start_match++;
}
+
+ /* See comment above in first_cu checking about the next line. */
+
+ if ((mb->moptions & (PCRE2_PARTIAL_HARD|PCRE2_PARTIAL_SOFT)) == 0 &&
+ start_match >= mb->end_subject)
+ break;
}
} /* End of first code unit handling */
/* Restore fudged end_subject */
- end_subject = save_end_subject;
+ end_subject = mb->end_subject;
/* The following two optimizations are disabled for partial matching. */
@@ -3829,8 +3648,7 @@ for (;;)
#ifdef SUPPORT_UNICODE
if (utf)
{
- ACROSSCHAR(start_match < end_subject, *start_match,
- start_match++);
+ ACROSSCHAR(start_match < end_subject, start_match, start_match++);
}
#endif
if (start_match > end_subject) break;
diff --git a/src/3rdparty/pcre2/src/pcre2_extuni.c b/src/3rdparty/pcre2/src/pcre2_extuni.c
new file mode 100644
index 0000000000..11a0bfbdd6
--- /dev/null
+++ b/src/3rdparty/pcre2/src/pcre2_extuni.c
@@ -0,0 +1,148 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Original API code Copyright (c) 1997-2012 University of Cambridge
+ New API code Copyright (c) 2016-2018 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+/* This module contains an internal function that is used to match a Unicode
+extended grapheme sequence. It is used by both pcre2_match() and
+pcre2_def_match(). However, it is called only when Unicode support is being
+compiled. Nevertheless, we provide a dummy function when there is no Unicode
+support, because some compilers do not like functionless source files. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+
+#include "pcre2_internal.h"
+
+
+/* Dummy function */
+
+#ifndef SUPPORT_UNICODE
+PCRE2_SPTR
+PRIV(extuni)(uint32_t c, PCRE2_SPTR eptr, PCRE2_SPTR start_subject,
+ PCRE2_SPTR end_subject, BOOL utf, int *xcount)
+{
+(void)c;
+(void)eptr;
+(void)start_subject;
+(void)end_subject;
+(void)utf;
+(void)xcount;
+return NULL;
+}
+#else
+
+
+/*************************************************
+* Match an extended grapheme sequence *
+*************************************************/
+
+/*
+Arguments:
+ c the first character
+ eptr pointer to next character
+ start_subject pointer to start of subject
+ end_subject pointer to end of subject
+ utf TRUE if in UTF mode
+ xcount pointer to count of additional characters,
+ or NULL if count not needed
+
+Returns: pointer after the end of the sequence
+*/
+
+PCRE2_SPTR
+PRIV(extuni)(uint32_t c, PCRE2_SPTR eptr, PCRE2_SPTR start_subject,
+ PCRE2_SPTR end_subject, BOOL utf, int *xcount)
+{
+int lgb = UCD_GRAPHBREAK(c);
+
+while (eptr < end_subject)
+ {
+ int rgb;
+ int len = 1;
+ if (!utf) c = *eptr; else { GETCHARLEN(c, eptr, len); }
+ rgb = UCD_GRAPHBREAK(c);
+ if ((PRIV(ucp_gbtable)[lgb] & (1 << rgb)) == 0) break;
+
+ /* Not breaking between Regional Indicators is allowed only if there
+ are an even number of preceding RIs. */
+
+ if (lgb == ucp_gbRegionalIndicator && rgb == ucp_gbRegionalIndicator)
+ {
+ int ricount = 0;
+ PCRE2_SPTR bptr = eptr - 1;
+ if (utf) BACKCHAR(bptr);
+
+ /* bptr is pointing to the left-hand character */
+
+ while (bptr > start_subject)
+ {
+ bptr--;
+ if (utf)
+ {
+ BACKCHAR(bptr);
+ GETCHAR(c, bptr);
+ }
+ else
+ c = *bptr;
+ if (UCD_GRAPHBREAK(c) != ucp_gbRegionalIndicator) break;
+ ricount++;
+ }
+ if ((ricount & 1) != 0) break; /* Grapheme break required */
+ }
+
+ /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
+ any number of Extend before a following E_Modifier. */
+
+ if (rgb != ucp_gbExtend ||
+ (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
+ lgb = rgb;
+
+ eptr += len;
+ if (xcount != NULL) *xcount += 1;
+ }
+
+return eptr;
+}
+
+#endif /* SUPPORT_UNICODE */
+
+/* End of pcre2_extuni.c */
diff --git a/src/3rdparty/pcre2/src/pcre2_internal.h b/src/3rdparty/pcre2/src/pcre2_internal.h
index 9ccce25d47..3db9d604f4 100644
--- a/src/3rdparty/pcre2/src/pcre2_internal.h
+++ b/src/3rdparty/pcre2/src/pcre2_internal.h
@@ -38,6 +38,9 @@ POSSIBILITY OF SUCH DAMAGE.
-----------------------------------------------------------------------------
*/
+#ifndef PCRE2_INTERNAL_H_IDEMPOTENT_GUARD
+#define PCRE2_INTERNAL_H_IDEMPOTENT_GUARD
+
/* We do not support both EBCDIC and Unicode at the same time. The "configure"
script prevents both being selected, but not everybody uses "configure". EBCDIC
is only supported for the 8-bit library, but the check for this has to be later
@@ -1767,6 +1770,7 @@ typedef struct open_capitem {
struct open_capitem *next; /* Chain link */
uint16_t number; /* Capture number */
uint16_t flag; /* Set TRUE if recursive back ref */
+ uint16_t assert_depth; /* Assertion depth when opened */
} open_capitem;
/* Layout of the UCP type table that translates property names into types and
@@ -1926,6 +1930,7 @@ is available. */
#define _pcre2_auto_possessify PCRE2_SUFFIX(_pcre2_auto_possessify_)
#define _pcre2_check_escape PCRE2_SUFFIX(_pcre2_check_escape_)
+#define _pcre2_extuni PCRE2_SUFFIX(_pcre2_extuni_)
#define _pcre2_find_bracket PCRE2_SUFFIX(_pcre2_find_bracket_)
#define _pcre2_is_newline PCRE2_SUFFIX(_pcre2_is_newline_)
#define _pcre2_jit_free_rodata PCRE2_SUFFIX(_pcre2_jit_free_rodata_)
@@ -1949,6 +1954,8 @@ extern int _pcre2_auto_possessify(PCRE2_UCHAR *, BOOL,
const compile_block *);
extern int _pcre2_check_escape(PCRE2_SPTR *, PCRE2_SPTR, uint32_t *,
int *, uint32_t, BOOL, compile_block *);
+extern PCRE2_SPTR _pcre2_extuni(uint32_t, PCRE2_SPTR, PCRE2_SPTR, PCRE2_SPTR,
+ BOOL, int *);
extern PCRE2_SPTR _pcre2_find_bracket(PCRE2_SPTR, BOOL, int);
extern BOOL _pcre2_is_newline(PCRE2_SPTR, uint32_t, PCRE2_SPTR,
uint32_t *, BOOL);
@@ -1970,5 +1977,6 @@ extern BOOL _pcre2_was_newline(PCRE2_SPTR, uint32_t, PCRE2_SPTR,
uint32_t *, BOOL);
extern BOOL _pcre2_xclass(uint32_t, PCRE2_SPTR, BOOL);
#endif /* PCRE2_CODE_UNIT_WIDTH */
+#endif /* PCRE2_INTERNAL_H_IDEMPOTENT_GUARD */
/* End of pcre2_internal.h */
diff --git a/src/3rdparty/pcre2/src/pcre2_intmodedep.h b/src/3rdparty/pcre2/src/pcre2_intmodedep.h
index 387f65eb08..c4c4c3adb9 100644
--- a/src/3rdparty/pcre2/src/pcre2_intmodedep.h
+++ b/src/3rdparty/pcre2/src/pcre2_intmodedep.h
@@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language.
Written by Philip Hazel
Original API code Copyright (c) 1997-2012 University of Cambridge
- New API code Copyright (c) 2016-2017 University of Cambridge
+ New API code Copyright (c) 2016-2018 University of Cambridge
-----------------------------------------------------------------------------
Redistribution and use in source and binary forms, with or without
@@ -351,7 +351,7 @@ because almost all calls are already within a block of UTF-8 only code. */
/* Same as above, but it allows a fully customizable form. */
#define ACROSSCHAR(condition, eptr, action) \
- while((condition) && ((eptr) & 0xc0u) == 0x80u) action
+ while((condition) && ((*eptr) & 0xc0u) == 0x80u) action
/* Deposit a character into memory, returning the number of code units. */
@@ -457,7 +457,7 @@ code. */
/* Same as above, but it allows a fully customizable form. */
#define ACROSSCHAR(condition, eptr, action) \
- if ((condition) && ((eptr) & 0xfc00u) == 0xdc00u) action
+ if ((condition) && ((*eptr) & 0xfc00u) == 0xdc00u) action
/* Deposit a character into memory, returning the number of code units. */
@@ -623,6 +623,7 @@ typedef struct pcre2_real_code {
uint32_t magic_number; /* Paranoid and endianness check */
uint32_t compile_options; /* Options passed to pcre2_compile() */
uint32_t overall_options; /* Options after processing the pattern */
+ uint32_t extra_options; /* Taken from compile_context */
uint32_t flags; /* Various state flags */
uint32_t limit_heap; /* Limit set in the pattern */
uint32_t limit_match; /* Limit set in the pattern */
@@ -639,11 +640,13 @@ typedef struct pcre2_real_code {
uint16_t name_count; /* Number of name entries in the table */
} pcre2_real_code;
-/* The real match data structure. Define ovector large so that array bound
-checkers don't grumble. Memory for this structure is obtained by calling
-pcre2_match_data_create(), which sets the size as the offset of ovector plus
-pairs of elements for each capturing group. (See also the heapframe structure
-below.) */
+/* The real match data structure. Define ovector as large as it can ever
+actually be so that array bound checkers don't grumble. Memory for this
+structure is obtained by calling pcre2_match_data_create(), which sets the size
+as the offset of ovector plus a pair of elements for each capturable string, so
+the size varies from call to call. As the maximum number of capturing
+subpatterns is 65535 we must allow for 65536 strings to include the overall
+match. (See also the heapframe structure below.) */
typedef struct pcre2_real_match_data {
pcre2_memctl memctl;
@@ -656,7 +659,7 @@ typedef struct pcre2_real_match_data {
uint16_t matchedby; /* Type of match (normal, JIT, DFA) */
uint16_t oveccount; /* Number of pairs */
int rc; /* The return code from the match */
- PCRE2_SIZE ovector[10000];/* The first field */
+ PCRE2_SIZE ovector[131072]; /* Must be last in the structure */
} pcre2_real_match_data;
@@ -723,6 +726,8 @@ typedef struct compile_block {
PCRE2_SIZE erroroffset; /* Offset of error in pattern */
uint16_t names_found; /* Number of entries so far */
uint16_t name_entry_size; /* Size of each entry */
+ uint16_t parens_depth; /* Depth of nested parentheses */
+ uint16_t assert_depth; /* Depth of nested assertions */
open_capitem *open_caps; /* Chain of open capture items */
named_group *named_groups; /* Points to vector in pre-compile */
uint32_t named_group_list_size; /* Number of entries in the list */
@@ -741,8 +746,6 @@ typedef struct compile_block {
uint32_t class_range_end; /* Overall class range end */
PCRE2_UCHAR nl[4]; /* Newline string when fixed length */
int max_lookbehind; /* Maximum lookbehind (characters) */
- int parens_depth; /* Depth of nested parentheses */
- int assert_depth; /* Depth of nested assertions */
int req_varyopt; /* "After variable item" flag for reqbyte */
BOOL had_accept; /* (*ACCEPT) encountered */
BOOL had_pruneorskip; /* (*PRUNE) or (*SKIP) encountered */
@@ -803,7 +806,7 @@ typedef struct heapframe {
runtime array bound checks don't catch references to it. However, for any
specific call to pcre2_match() the memory allocated for each frame structure
allows for exactly the right size ovector for the number of capturing
- parentheses. */
+ parentheses. (See also the comment for pcre2_real_match_data above.) */
PCRE2_SPTR eptr; /* MUST BE FIRST */
PCRE2_SPTR start_match; /* Can be adjusted by \K */
@@ -812,7 +815,7 @@ typedef struct heapframe {
uint32_t capture_last; /* Most recent capture */
PCRE2_SIZE last_group_offset; /* Saved offset to most recent group frame */
PCRE2_SIZE offset_top; /* Offset after highest capture */
- PCRE2_SIZE ovector[10000]; /* Must be last in the structure */
+ PCRE2_SIZE ovector[131072]; /* Must be last in the structure */
} heapframe;
typedef char check_heapframe_size[
@@ -861,6 +864,7 @@ typedef struct match_block {
uint32_t nltype; /* Newline type */
uint32_t nllen; /* Newline string length */
PCRE2_UCHAR nl[4]; /* Newline string when fixed */
+ pcre2_callout_block *cb; /* Points to a callout block */
void *callout_data; /* To pass back to callouts */
int (*callout)(pcre2_callout_block *,void *); /* Callout function or NULL */
} match_block;
@@ -886,6 +890,7 @@ typedef struct dfa_match_block {
uint32_t nllen; /* Newline string length */
PCRE2_UCHAR nl[4]; /* Newline string when fixed */
uint16_t bsr_convention; /* \R interpretation */
+ pcre2_callout_block *cb; /* Points to a callout block */
void *callout_data; /* To pass back to callouts */
int (*callout)(pcre2_callout_block *,void *); /* Callout function or NULL */
dfa_recursion_info *recursive; /* Linked list of recursion data */
diff --git a/src/3rdparty/pcre2/src/pcre2_jit_compile.c b/src/3rdparty/pcre2/src/pcre2_jit_compile.c
index c7bf0b2c3e..80ed1c4ca6 100644
--- a/src/3rdparty/pcre2/src/pcre2_jit_compile.c
+++ b/src/3rdparty/pcre2/src/pcre2_jit_compile.c
@@ -228,7 +228,7 @@ enum control_types {
type_then_trap = 1
};
-typedef int (SLJIT_CALL *jit_function)(jit_arguments *args);
+typedef int (SLJIT_FUNC *jit_function)(jit_arguments *args);
/* The following structure is the key data type for the recursive
code generator. It is allocated by compile_matchingpath, and contains
@@ -527,12 +527,27 @@ typedef struct compare_context {
/* Used for accessing the elements of the stack. */
#define STACK(i) ((i) * (int)sizeof(sljit_sw))
+#ifdef SLJIT_PREF_SHIFT_REG
+#if SLJIT_PREF_SHIFT_REG == SLJIT_R2
+/* Nothing. */
+#elif SLJIT_PREF_SHIFT_REG == SLJIT_R3
+#define SHIFT_REG_IS_R3
+#else
+#error "Unsupported shift register"
+#endif
+#endif
+
#define TMP1 SLJIT_R0
+#ifdef SHIFT_REG_IS_R3
+#define TMP2 SLJIT_R3
+#define TMP3 SLJIT_R2
+#else
#define TMP2 SLJIT_R2
#define TMP3 SLJIT_R3
-#define STR_PTR SLJIT_S0
-#define STR_END SLJIT_S1
-#define STACK_TOP SLJIT_R1
+#endif
+#define STR_PTR SLJIT_R1
+#define STR_END SLJIT_S0
+#define STACK_TOP SLJIT_S1
#define STACK_LIMIT SLJIT_S2
#define COUNT_MATCH SLJIT_S3
#define ARGUMENTS SLJIT_S4
@@ -558,16 +573,13 @@ the start pointers when the end of the capturing group has not yet reached. */
#if PCRE2_CODE_UNIT_WIDTH == 8
#define MOV_UCHAR SLJIT_MOV_U8
-#define MOVU_UCHAR SLJIT_MOVU_U8
#define IN_UCHARS(x) (x)
#elif PCRE2_CODE_UNIT_WIDTH == 16
#define MOV_UCHAR SLJIT_MOV_U16
-#define MOVU_UCHAR SLJIT_MOVU_U16
#define UCHAR_SHIFT (1)
#define IN_UCHARS(x) ((x) * 2)
#elif PCRE2_CODE_UNIT_WIDTH == 32
#define MOV_UCHAR SLJIT_MOV_U32
-#define MOVU_UCHAR SLJIT_MOVU_U32
#define UCHAR_SHIFT (2)
#define IN_UCHARS(x) ((x) * 4)
#else
@@ -2697,12 +2709,25 @@ if (length < 8)
}
else
{
- GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START);
- OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1);
- loop = LABEL();
- OP1(SLJIT_MOVU, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw), SLJIT_R0, 0);
- OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1);
- JUMPTO(SLJIT_NOT_ZERO, loop);
+ if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)) == SLJIT_SUCCESS)
+ {
+ GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START);
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1);
+ loop = LABEL();
+ sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw));
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1);
+ JUMPTO(SLJIT_NOT_ZERO, loop);
+ }
+ else
+ {
+ GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START + sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1);
+ loop = LABEL();
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), 0, SLJIT_R0, 0);
+ OP2(SLJIT_ADD, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1);
+ JUMPTO(SLJIT_NOT_ZERO, loop);
+ }
}
}
@@ -2735,12 +2760,25 @@ if (length < 8)
}
else
{
- GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw));
- OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2);
- loop = LABEL();
- OP1(SLJIT_MOVU, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP1, 0);
- OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1);
- JUMPTO(SLJIT_NOT_ZERO, loop);
+ if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)) == SLJIT_SUCCESS)
+ {
+ GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw));
+ OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2);
+ loop = LABEL();
+ sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw));
+ OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1);
+ JUMPTO(SLJIT_NOT_ZERO, loop);
+ }
+ else
+ {
+ GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + 2 * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2);
+ loop = LABEL();
+ OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, TMP1, 0);
+ OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1);
+ JUMPTO(SLJIT_NOT_ZERO, loop);
+ }
}
OP1(SLJIT_MOV, STACK_TOP, 0, ARGUMENTS, 0);
@@ -2750,10 +2788,10 @@ if (common->control_head_ptr != 0)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0);
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(jit_arguments, stack));
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->start_ptr);
-OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(struct sljit_stack, base));
+OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(struct sljit_stack, end));
}
-static sljit_sw SLJIT_CALL do_search_mark(sljit_sw *current, PCRE2_SPTR skip_arg)
+static sljit_sw SLJIT_FUNC do_search_mark(sljit_sw *current, PCRE2_SPTR skip_arg)
{
while (current != NULL)
{
@@ -2774,13 +2812,14 @@ while (current != NULL)
SLJIT_ASSERT(current[0] == 0 || current < (sljit_sw*)current[0]);
current = (sljit_sw*)current[0];
}
-return -1;
+return 0;
}
static SLJIT_INLINE void copy_ovector(compiler_common *common, int topbracket)
{
DEFINE_COMPILER;
struct sljit_label *loop;
+BOOL has_pre;
/* At this point we can freely use all registers. */
OP1(SLJIT_MOV, SLJIT_S2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1));
@@ -2797,36 +2836,62 @@ if (common->mark_ptr != 0)
OP2(SLJIT_ADD, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, match_data),
SLJIT_IMM, SLJIT_OFFSETOF(pcre2_match_data, ovector) - sizeof(PCRE2_SIZE));
-GET_LOCAL_BASE(SLJIT_S0, 0, OVECTOR_START);
+has_pre = sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)) == SLJIT_SUCCESS;
+
+GET_LOCAL_BASE(SLJIT_S0, 0, OVECTOR_START - (has_pre ? sizeof(sljit_sw) : 0));
OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, begin));
loop = LABEL();
-OP2(SLJIT_SUB, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_S0), 0, SLJIT_R0, 0);
-OP2(SLJIT_ADD, SLJIT_S0, 0, SLJIT_S0, 0, SLJIT_IMM, sizeof(sljit_sw));
+
+if (has_pre)
+ sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw));
+else
+ {
+ OP1(SLJIT_MOV, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_S0), 0);
+ OP2(SLJIT_ADD, SLJIT_S0, 0, SLJIT_S0, 0, SLJIT_IMM, sizeof(sljit_sw));
+ }
+
+OP2(SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, sizeof(PCRE2_SIZE));
+OP2(SLJIT_SUB, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_R0, 0);
/* Copy the integer value to the output buffer */
#if PCRE2_CODE_UNIT_WIDTH == 16 || PCRE2_CODE_UNIT_WIDTH == 32
OP2(SLJIT_ASHR, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
+
SLJIT_ASSERT(sizeof(PCRE2_SIZE) == 4 || sizeof(PCRE2_SIZE) == 8);
-if (sizeof(PCRE2_SIZE) == 4)
- OP1(SLJIT_MOVU_U32, SLJIT_MEM1(SLJIT_R2), sizeof(PCRE2_SIZE), SLJIT_S1, 0);
-else
- OP1(SLJIT_MOVU, SLJIT_MEM1(SLJIT_R2), sizeof(PCRE2_SIZE), SLJIT_S1, 0);
+OP1(((sizeof(PCRE2_SIZE) == 4) ? SLJIT_MOV_U32 : SLJIT_MOV), SLJIT_MEM1(SLJIT_R2), 0, SLJIT_S1, 0);
+
OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, loop);
/* Calculate the return value, which is the maximum ovector value. */
if (topbracket > 1)
{
- GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw));
- OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1);
+ if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))) == SLJIT_SUCCESS)
+ {
+ GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1);
- /* OVECTOR(0) is never equal to SLJIT_S2. */
- loop = LABEL();
- OP1(SLJIT_MOVU, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw)));
- OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
- CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop);
- OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0);
+ /* OVECTOR(0) is never equal to SLJIT_S2. */
+ loop = LABEL();
+ sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw)));
+ OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
+ CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop);
+ OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0);
+ }
+ else
+ {
+ GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + (topbracket - 1) * 2 * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1);
+
+ /* OVECTOR(0) is never equal to SLJIT_S2. */
+ loop = LABEL();
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), 0);
+ OP2(SLJIT_SUB, SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 2 * (sljit_sw)sizeof(sljit_sw));
+ OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
+ CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop);
+ OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0);
+ }
}
else
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1);
@@ -2837,7 +2902,7 @@ static SLJIT_INLINE void return_with_partial_match(compiler_common *common, stru
DEFINE_COMPILER;
sljit_s32 mov_opcode;
-SLJIT_COMPILE_ASSERT(STR_END == SLJIT_S1, str_end_must_be_saved_reg2);
+SLJIT_COMPILE_ASSERT(STR_END == SLJIT_S0, str_end_must_be_saved_reg0);
SLJIT_ASSERT(common->start_used_ptr != 0 && common->start_ptr != 0
&& (common->mode == PCRE2_JIT_PARTIAL_SOFT ? common->hit_start != 0 : common->hit_start == 0));
@@ -2847,19 +2912,19 @@ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP),
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE2_ERROR_PARTIAL);
/* Store match begin and end. */
-OP1(SLJIT_MOV, SLJIT_S0, 0, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, begin));
+OP1(SLJIT_MOV, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, begin));
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, startchar_ptr), SLJIT_R2, 0);
OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, match_data));
mov_opcode = (sizeof(PCRE2_SIZE) == 4) ? SLJIT_MOV_U32 : SLJIT_MOV;
-OP2(SLJIT_SUB, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_S0, 0);
+OP2(SLJIT_SUB, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_S1, 0);
#if PCRE2_CODE_UNIT_WIDTH == 16 || PCRE2_CODE_UNIT_WIDTH == 32
OP2(SLJIT_ASHR, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
OP1(mov_opcode, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(pcre2_match_data, ovector), SLJIT_R2, 0);
-OP2(SLJIT_SUB, STR_END, 0, STR_END, 0, SLJIT_S0, 0);
+OP2(SLJIT_SUB, STR_END, 0, STR_END, 0, SLJIT_S1, 0);
#if PCRE2_CODE_UNIT_WIDTH == 16 || PCRE2_CODE_UNIT_WIDTH == 32
OP2(SLJIT_ASHR, STR_END, 0, STR_END, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
@@ -4351,7 +4416,6 @@ struct sljit_jump *quit;
struct sljit_jump *partial_quit[2];
sljit_u8 instruction[8];
sljit_s32 tmp1_ind = sljit_get_register_index(TMP1);
-// sljit_s32 tmp2_ind = sljit_get_register_index(TMP2);
sljit_s32 str_ptr_ind = sljit_get_register_index(STR_PTR);
sljit_s32 data_ind = 0;
sljit_s32 tmp_ind = 1;
@@ -4376,8 +4440,6 @@ if (common->mode == PCRE2_JIT_COMPLETE)
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, character_to_int32(char1 | bit));
-// SLJIT_ASSERT(tmp1_ind < 8 && tmp2_ind == 1);
-
SLJIT_ASSERT(tmp1_ind < 8);
/* MOVD xmm, r/m32 */
@@ -5976,93 +6038,190 @@ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
}
-#define CHAR1 STR_END
-#define CHAR2 STACK_TOP
-
static void do_casefulcmp(compiler_common *common)
{
DEFINE_COMPILER;
struct sljit_jump *jump;
struct sljit_label *label;
+int char1_reg;
+int char2_reg;
-sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
+if (sljit_get_register_index(TMP3) < 0)
+ {
+ char1_reg = STR_END;
+ char2_reg = STACK_TOP;
+ }
+else
+ {
+ char1_reg = TMP3;
+ char2_reg = RETURN_ADDR;
+ }
+
+sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0);
-OP1(SLJIT_MOV, TMP3, 0, CHAR1, 0);
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, CHAR2, 0);
-OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
-OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
-label = LABEL();
-OP1(MOVU_UCHAR, CHAR1, 0, SLJIT_MEM1(TMP1), IN_UCHARS(1));
-OP1(MOVU_UCHAR, CHAR2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
-jump = CMP(SLJIT_NOT_EQUAL, CHAR1, 0, CHAR2, 0);
-OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
-JUMPTO(SLJIT_NOT_ZERO, label);
+if (char1_reg == STR_END)
+ {
+ OP1(SLJIT_MOV, TMP3, 0, char1_reg, 0);
+ OP1(SLJIT_MOV, RETURN_ADDR, 0, char2_reg, 0);
+ }
-JUMPHERE(jump);
-OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
-OP1(SLJIT_MOV, CHAR1, 0, TMP3, 0);
-OP1(SLJIT_MOV, CHAR2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
-sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
-}
+if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
+ {
+ label = LABEL();
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
+ jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
+ JUMPTO(SLJIT_NOT_ZERO, label);
+
+ JUMPHERE(jump);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ }
+else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
+ {
+ OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
+ OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+
+ label = LABEL();
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
+ jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
+ JUMPTO(SLJIT_NOT_ZERO, label);
+
+ JUMPHERE(jump);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+ }
+else
+ {
+ label = LABEL();
+ OP1(MOV_UCHAR, char1_reg, 0, SLJIT_MEM1(TMP1), 0);
+ OP1(MOV_UCHAR, char2_reg, 0, SLJIT_MEM1(STR_PTR), 0);
+ OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
+ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+ jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
+ JUMPTO(SLJIT_NOT_ZERO, label);
-#define LCC_TABLE STACK_LIMIT
+ JUMPHERE(jump);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ }
+
+if (char1_reg == STR_END)
+ {
+ OP1(SLJIT_MOV, char1_reg, 0, TMP3, 0);
+ OP1(SLJIT_MOV, char2_reg, 0, RETURN_ADDR, 0);
+ }
+
+sljit_emit_fast_return(compiler, TMP1, 0);
+}
static void do_caselesscmp(compiler_common *common)
{
DEFINE_COMPILER;
struct sljit_jump *jump;
struct sljit_label *label;
+int char1_reg = STR_END;
+int char2_reg;
+int lcc_table;
+int opt_type = 0;
-sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
+if (sljit_get_register_index(TMP3) < 0)
+ {
+ char2_reg = STACK_TOP;
+ lcc_table = STACK_LIMIT;
+ }
+else
+ {
+ char2_reg = RETURN_ADDR;
+ lcc_table = TMP3;
+ }
+
+if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
+ opt_type = 1;
+else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
+ opt_type = 2;
+
+sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0);
-OP1(SLJIT_MOV, TMP3, 0, LCC_TABLE, 0);
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, CHAR1, 0);
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, CHAR2, 0);
-OP1(SLJIT_MOV, LCC_TABLE, 0, SLJIT_IMM, common->lcc);
-OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
-OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, char1_reg, 0);
+
+if (char2_reg == STACK_TOP)
+ {
+ OP1(SLJIT_MOV, TMP3, 0, char2_reg, 0);
+ OP1(SLJIT_MOV, RETURN_ADDR, 0, lcc_table, 0);
+ }
+
+OP1(SLJIT_MOV, lcc_table, 0, SLJIT_IMM, common->lcc);
+
+if (opt_type == 1)
+ {
+ label = LABEL();
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
+ }
+else if (opt_type == 2)
+ {
+ OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
+ OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+
+ label = LABEL();
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
+ }
+else
+ {
+ label = LABEL();
+ OP1(MOV_UCHAR, char1_reg, 0, SLJIT_MEM1(TMP1), 0);
+ OP1(MOV_UCHAR, char2_reg, 0, SLJIT_MEM1(STR_PTR), 0);
+ OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
+ }
-label = LABEL();
-OP1(MOVU_UCHAR, CHAR1, 0, SLJIT_MEM1(TMP1), IN_UCHARS(1));
-OP1(MOVU_UCHAR, CHAR2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
#if PCRE2_CODE_UNIT_WIDTH != 8
-jump = CMP(SLJIT_GREATER, CHAR1, 0, SLJIT_IMM, 255);
+jump = CMP(SLJIT_GREATER, char1_reg, 0, SLJIT_IMM, 255);
#endif
-OP1(SLJIT_MOV_U8, CHAR1, 0, SLJIT_MEM2(LCC_TABLE, CHAR1), 0);
+OP1(SLJIT_MOV_U8, char1_reg, 0, SLJIT_MEM2(lcc_table, char1_reg), 0);
#if PCRE2_CODE_UNIT_WIDTH != 8
JUMPHERE(jump);
-jump = CMP(SLJIT_GREATER, CHAR2, 0, SLJIT_IMM, 255);
+jump = CMP(SLJIT_GREATER, char2_reg, 0, SLJIT_IMM, 255);
#endif
-OP1(SLJIT_MOV_U8, CHAR2, 0, SLJIT_MEM2(LCC_TABLE, CHAR2), 0);
+OP1(SLJIT_MOV_U8, char2_reg, 0, SLJIT_MEM2(lcc_table, char2_reg), 0);
#if PCRE2_CODE_UNIT_WIDTH != 8
JUMPHERE(jump);
#endif
-jump = CMP(SLJIT_NOT_EQUAL, CHAR1, 0, CHAR2, 0);
+
+if (opt_type == 0)
+ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+
+jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
JUMPTO(SLJIT_NOT_ZERO, label);
JUMPHERE(jump);
-OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
-OP1(SLJIT_MOV, LCC_TABLE, 0, TMP3, 0);
-OP1(SLJIT_MOV, CHAR1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
-OP1(SLJIT_MOV, CHAR2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
-sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
-}
+OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
-#undef LCC_TABLE
-#undef CHAR1
-#undef CHAR2
+if (opt_type == 2)
+ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+
+if (char2_reg == STACK_TOP)
+ {
+ OP1(SLJIT_MOV, char2_reg, 0, TMP3, 0);
+ OP1(SLJIT_MOV, lcc_table, 0, RETURN_ADDR, 0);
+ }
+
+OP1(SLJIT_MOV, char1_reg, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
+sljit_emit_fast_return(compiler, TMP1, 0);
+}
#if defined SUPPORT_UNICODE
-static PCRE2_SPTR SLJIT_CALL do_utf_caselesscmp(PCRE2_SPTR src1, jit_arguments *args, PCRE2_SPTR end1)
+static PCRE2_SPTR SLJIT_FUNC do_utf_caselesscmp(PCRE2_SPTR src1, PCRE2_SPTR src2, PCRE2_SPTR end1, PCRE2_SPTR end2)
{
/* This function would be ineffective to do in JIT level. */
sljit_u32 c1, c2;
-PCRE2_SPTR src2 = args->startchar_ptr;
-PCRE2_SPTR end2 = args->end;
const ucd_record *ur;
const sljit_u32 *pp;
@@ -7048,6 +7207,122 @@ SLJIT_UNREACHABLE();
return cc;
}
+#ifdef SUPPORT_UNICODE
+
+#if PCRE2_CODE_UNIT_WIDTH != 32
+
+static PCRE2_SPTR SLJIT_FUNC do_extuni_utf(jit_arguments *args, PCRE2_SPTR cc)
+{
+PCRE2_SPTR start_subject = args->begin;
+PCRE2_SPTR end_subject = args->end;
+int lgb, rgb, len, ricount;
+PCRE2_SPTR prevcc, bptr;
+uint32_t c;
+
+prevcc = cc;
+GETCHARINC(c, cc);
+lgb = UCD_GRAPHBREAK(c);
+
+while (cc < end_subject)
+ {
+ len = 1;
+ GETCHARLEN(c, cc, len);
+ rgb = UCD_GRAPHBREAK(c);
+
+ if ((PRIV(ucp_gbtable)[lgb] & (1 << rgb)) == 0) break;
+
+ /* Not breaking between Regional Indicators is allowed only if there
+ are an even number of preceding RIs. */
+
+ if (lgb == ucp_gbRegionalIndicator && rgb == ucp_gbRegionalIndicator)
+ {
+ ricount = 0;
+ bptr = prevcc;
+
+ /* bptr is pointing to the left-hand character */
+ while (bptr > start_subject)
+ {
+ bptr--;
+ BACKCHAR(bptr);
+ GETCHAR(c, bptr);
+
+ if (UCD_GRAPHBREAK(c) != ucp_gbRegionalIndicator) break;
+
+ ricount++;
+ }
+
+ if ((ricount & 1) != 0) break; /* Grapheme break required */
+ }
+
+ /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
+ any number of Extend before a following E_Modifier. */
+
+ if (rgb != ucp_gbExtend || (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
+ lgb = rgb;
+
+ prevcc = cc;
+ cc += len;
+ }
+
+return cc;
+}
+
+#endif
+
+static PCRE2_SPTR SLJIT_FUNC do_extuni_no_utf(jit_arguments *args, PCRE2_SPTR cc)
+{
+PCRE2_SPTR start_subject = args->begin;
+PCRE2_SPTR end_subject = args->end;
+int lgb, rgb, ricount;
+PCRE2_SPTR bptr;
+uint32_t c;
+
+GETCHARINC(c, cc);
+lgb = UCD_GRAPHBREAK(c);
+
+while (cc < end_subject)
+ {
+ c = *cc;
+ rgb = UCD_GRAPHBREAK(c);
+
+ if ((PRIV(ucp_gbtable)[lgb] & (1 << rgb)) == 0) break;
+
+ /* Not breaking between Regional Indicators is allowed only if there
+ are an even number of preceding RIs. */
+
+ if (lgb == ucp_gbRegionalIndicator && rgb == ucp_gbRegionalIndicator)
+ {
+ ricount = 0;
+ bptr = cc - 1;
+
+ /* bptr is pointing to the left-hand character */
+ while (bptr > start_subject)
+ {
+ bptr--;
+ c = *bptr;
+
+ if (UCD_GRAPHBREAK(c) != ucp_gbRegionalIndicator) break;
+
+ ricount++;
+ }
+
+ if ((ricount & 1) != 0) break; /* Grapheme break required */
+ }
+
+ /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
+ any number of Extend before a following E_Modifier. */
+
+ if (rgb != ucp_gbExtend || (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
+ lgb = rgb;
+
+ cc++;
+ }
+
+return cc;
+}
+
+#endif
+
static PCRE2_SPTR compile_char1_matchingpath(compiler_common *common, PCRE2_UCHAR type, PCRE2_SPTR cc, jump_list **backtracks, BOOL check_str_ptr)
{
DEFINE_COMPILER;
@@ -7057,7 +7332,6 @@ compare_context context;
struct sljit_jump *jump[3];
jump_list *end_list;
#ifdef SUPPORT_UNICODE
-struct sljit_label *label;
PCRE2_UCHAR propdata[5];
#endif /* SUPPORT_UNICODE */
@@ -7224,35 +7498,22 @@ switch(type)
case OP_EXTUNI:
if (check_str_ptr)
detect_partial_match(common, backtracks);
- read_char(common);
- add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL));
- OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, gbprop));
- /* Optimize register allocation: use a real register. */
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0);
- OP1(SLJIT_MOV_U8, STACK_TOP, 0, SLJIT_MEM2(TMP1, TMP2), 3);
- label = LABEL();
- jump[0] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0);
- OP1(SLJIT_MOV, TMP3, 0, STR_PTR, 0);
- read_char(common);
- add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL));
- OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, gbprop));
- OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM2(TMP1, TMP2), 3);
+ SLJIT_ASSERT(TMP1 == SLJIT_R0 && STR_PTR == SLJIT_R1);
+ OP1(SLJIT_MOV, SLJIT_R0, 0, ARGUMENTS, 0);
- OP2(SLJIT_SHL, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2);
- OP1(SLJIT_MOV_U32, TMP1, 0, SLJIT_MEM1(STACK_TOP), (sljit_sw)PRIV(ucp_gbtable));
- OP1(SLJIT_MOV, STACK_TOP, 0, TMP2, 0);
- OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
- OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
- JUMPTO(SLJIT_NOT_ZERO, label);
+#if PCRE2_CODE_UNIT_WIDTH != 32
+ sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM,
+ common->utf ? SLJIT_FUNC_OFFSET(do_extuni_utf) : SLJIT_FUNC_OFFSET(do_extuni_no_utf));
+#else
+ sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_extuni_no_utf));
+#endif
- OP1(SLJIT_MOV, STR_PTR, 0, TMP3, 0);
- JUMPHERE(jump[0]);
- OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0);
if (common->mode == PCRE2_JIT_PARTIAL_HARD)
{
- jump[0] = CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0);
+ jump[0] = CMP(SLJIT_LESS, SLJIT_RETURN_REG, 0, STR_END, 0);
/* Since we successfully read a char above, partial matching must occure. */
check_partial(common, TRUE);
JUMPHERE(jump[0]);
@@ -7585,32 +7846,34 @@ else
#if defined SUPPORT_UNICODE
if (common->utf && *cc == OP_REFI)
{
- SLJIT_ASSERT(TMP1 == SLJIT_R0 && STACK_TOP == SLJIT_R1 && TMP2 == SLJIT_R2);
+ SLJIT_ASSERT(TMP1 == SLJIT_R0 && STR_PTR == SLJIT_R1);
if (ref)
- OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1));
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1));
else
- OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw));
if (withchecks)
- jump = CMP(SLJIT_EQUAL, TMP1, 0, TMP2, 0);
-
- /* Needed to save important temporary registers. */
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0);
- OP1(SLJIT_MOV, SLJIT_R1, 0, ARGUMENTS, 0);
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, startchar_ptr), STR_PTR, 0);
- sljit_emit_ijump(compiler, SLJIT_CALL3, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_utf_caselesscmp));
- OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ jump = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_R2, 0);
+ /* No free saved registers so save data on stack. */
+
+ OP1(SLJIT_MOV, SLJIT_R3, 0, STR_END, 0);
+ sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW) | SLJIT_ARG3(SW) | SLJIT_ARG4(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_utf_caselesscmp));
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0);
+
if (common->mode == PCRE2_JIT_COMPLETE)
add_jump(compiler, backtracks, CMP(SLJIT_LESS_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1));
else
{
- add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0));
- nopartial = CMP(SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_LESS, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1);
+
+ add_jump(compiler, backtracks, JUMP(SLJIT_LESS));
+
+ nopartial = JUMP(SLJIT_NOT_EQUAL);
+ OP1(SLJIT_MOV, STR_PTR, 0, STR_END, 0);
check_partial(common, FALSE);
add_jump(compiler, backtracks, JUMP(SLJIT_JUMP));
JUMPHERE(nopartial);
}
- OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0);
}
else
#endif /* SUPPORT_UNICODE */
@@ -7924,7 +8187,7 @@ BACKTRACK_AS(recurse_backtrack)->matchingpath = LABEL();
return cc + 1 + LINK_SIZE;
}
-static int SLJIT_CALL do_callout(struct jit_arguments *arguments, pcre2_callout_block *callout_block, PCRE2_SPTR *jit_ovector)
+static sljit_s32 SLJIT_FUNC do_callout(struct jit_arguments *arguments, pcre2_callout_block *callout_block, PCRE2_SPTR *jit_ovector)
{
PCRE2_SPTR begin;
PCRE2_SIZE *ovector;
@@ -7941,7 +8204,8 @@ oveccount = callout_block->capture_top;
SLJIT_ASSERT(oveccount >= 1);
-callout_block->version = 1;
+callout_block->version = 2;
+callout_block->callout_flags = 0;
/* Offsets in subject. */
callout_block->subject_length = arguments->end - arguments->begin;
@@ -8033,23 +8297,24 @@ OP1(mov_opcode, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(callout_string_length)
OP1(mov_opcode, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(callout_string_offset), SLJIT_IMM, value3);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(mark), (common->mark_ptr != 0) ? TMP2 : SLJIT_IMM, 0);
+SLJIT_ASSERT(TMP1 == SLJIT_R0 && STR_PTR == SLJIT_R1);
+
/* Needed to save important temporary registers. */
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0);
+OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STR_PTR, 0);
/* SLJIT_R0 = arguments */
OP1(SLJIT_MOV, SLJIT_R1, 0, STACK_TOP, 0);
GET_LOCAL_BASE(SLJIT_R2, 0, OVECTOR_START);
-sljit_emit_ijump(compiler, SLJIT_CALL3, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_callout));
-OP1(SLJIT_MOV_S32, SLJIT_RETURN_REG, 0, SLJIT_RETURN_REG, 0);
-OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(S32) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW) | SLJIT_ARG3(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_callout));
+OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
free_stack(common, callout_arg_size);
/* Check return value. */
-OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_SIG_GREATER, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
-add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_SIG_GREATER));
+OP2(SLJIT_SUB32 | SLJIT_SET_Z | SLJIT_SET_SIG_GREATER, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
+add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_SIG_GREATER32));
if (common->abort_label == NULL)
- add_jump(compiler, &common->abort, JUMP(SLJIT_NOT_EQUAL) /* SIG_LESS */);
+ add_jump(compiler, &common->abort, JUMP(SLJIT_NOT_EQUAL32) /* SIG_LESS */);
else
- JUMPTO(SLJIT_NOT_EQUAL /* SIG_LESS */, common->abort_label);
+ JUMPTO(SLJIT_NOT_EQUAL32 /* SIG_LESS */, common->abort_label);
return cc + callout_length;
}
@@ -11279,15 +11544,13 @@ if (common->local_quit_available)
if (opcode == OP_SKIP_ARG)
{
- SLJIT_ASSERT(common->control_head_ptr != 0);
+ SLJIT_ASSERT(common->control_head_ptr != 0 && TMP1 == SLJIT_R0 && STR_PTR == SLJIT_R1);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr);
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0);
- OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, (sljit_sw)(current->cc + 2));
- sljit_emit_ijump(compiler, SLJIT_CALL2, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_search_mark));
- OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, (sljit_sw)(current->cc + 2));
+ sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_search_mark));
- OP1(SLJIT_MOV, STR_PTR, 0, TMP1, 0);
- add_jump(compiler, &common->reset_match, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, -1));
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_R0, 0);
+ add_jump(compiler, &common->reset_match, CMP(SLJIT_NOT_EQUAL, SLJIT_R0, 0, SLJIT_IMM, 0));
return;
}
@@ -11957,7 +12220,7 @@ if (!compiler)
common->compiler = compiler;
/* Main pcre_jit_exec entry. */
-sljit_emit_enter(compiler, 0, 1, 5, 5, 0, 0, private_data_size);
+sljit_emit_enter(compiler, 0, SLJIT_ARG1(SW), 5, 5, 0, 0, private_data_size);
/* Register init. */
reset_ovector(common, (re->top_bracket + 1) * 2);
@@ -11970,8 +12233,8 @@ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str))
OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, end));
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack));
OP1(SLJIT_MOV_U32, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, limit_match));
-OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, base));
-OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, limit));
+OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, end));
+OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, start));
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LIMIT_MATCH, TMP1, 0);
@@ -12200,20 +12463,23 @@ common->quit_label = quit_label;
set_jumps(common->stackalloc, LABEL());
/* RETURN_ADDR is not a saved register. */
sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, TMP2, 0);
-OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0);
-OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack));
-OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, top), STACK_TOP, 0);
-OP2(SLJIT_SUB, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit), SLJIT_IMM, STACK_GROWTH_RATE);
-sljit_emit_ijump(compiler, SLJIT_CALL2, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_stack_resize));
-jump = CMP(SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
-OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0);
-OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack));
-OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, top));
-OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit));
-OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
-sljit_emit_fast_return(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+SLJIT_ASSERT(TMP1 == SLJIT_R0 && STR_PTR == SLJIT_R1);
+
+OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, STR_PTR, 0);
+OP1(SLJIT_MOV, SLJIT_R0, 0, ARGUMENTS, 0);
+OP2(SLJIT_SUB, SLJIT_R1, 0, STACK_LIMIT, 0, SLJIT_IMM, STACK_GROWTH_RATE);
+OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, stack));
+OP1(SLJIT_MOV, STACK_LIMIT, 0, TMP2, 0);
+
+sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_stack_resize));
+
+jump = CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
+OP1(SLJIT_MOV, TMP2, 0, STACK_LIMIT, 0);
+OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_RETURN_REG, 0);
+OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
+sljit_emit_fast_return(compiler, TMP1, 0);
/* Allocation failed. */
JUMPHERE(jump);
diff --git a/src/3rdparty/pcre2/src/pcre2_jit_match.c b/src/3rdparty/pcre2/src/pcre2_jit_match.c
index 4cad754c75..5a66545bae 100644
--- a/src/3rdparty/pcre2/src/pcre2_jit_match.c
+++ b/src/3rdparty/pcre2/src/pcre2_jit_match.c
@@ -49,9 +49,9 @@ static SLJIT_NOINLINE int jit_machine_stack_exec(jit_arguments *arguments, jit_f
sljit_u8 local_space[MACHINE_STACK_SIZE];
struct sljit_stack local_stack;
-local_stack.max_limit = local_space;
-local_stack.limit = local_space;
-local_stack.base = local_space + MACHINE_STACK_SIZE;
+local_stack.min_start = local_space;
+local_stack.start = local_space;
+local_stack.end = local_space + MACHINE_STACK_SIZE;
local_stack.top = local_space + MACHINE_STACK_SIZE;
arguments->stack = &local_stack;
return executable_func(arguments);
@@ -118,7 +118,7 @@ if ((options & PCRE2_PARTIAL_HARD) != 0)
else if ((options & PCRE2_PARTIAL_SOFT) != 0)
index = 1;
-if (functions->executable_funcs[index] == NULL)
+if (functions == NULL || functions->executable_funcs[index] == NULL)
return PCRE2_ERROR_JIT_BADOPTION;
/* Sanity checks should be handled by pcre_exec. */
diff --git a/src/3rdparty/pcre2/src/pcre2_match.c b/src/3rdparty/pcre2/src/pcre2_match.c
index 050b7e93ec..79cc93f918 100644
--- a/src/3rdparty/pcre2/src/pcre2_match.c
+++ b/src/3rdparty/pcre2/src/pcre2_match.c
@@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language.
Written by Philip Hazel
Original API code Copyright (c) 1997-2012 University of Cambridge
- New API code Copyright (c) 2015-2017 University of Cambridge
+ New API code Copyright (c) 2015-2018 University of Cambridge
-----------------------------------------------------------------------------
Redistribution and use in source and binary forms, with or without
@@ -249,7 +249,8 @@ for (i = 0, Q = mb->match_frames;
/* This function is called for all callouts, whether "standalone" or at the
start of a conditional group. Feptr will be pointing to either OP_CALLOUT or
-OP_CALLOUT_STR.
+OP_CALLOUT_STR. A callout block is allocated in pcre2_match() and initialized
+with fixed values.
Arguments:
F points to the current backtracking frame
@@ -266,7 +267,7 @@ do_callout(heapframe *F, match_block *mb, PCRE2_SIZE *lengthptr)
int rc;
PCRE2_SIZE save0, save1;
PCRE2_SIZE *callout_ovector;
-pcre2_callout_block cb;
+pcre2_callout_block *cb;
*lengthptr = (*Fecode == OP_CALLOUT)?
PRIV(OP_lengths)[OP_CALLOUT] : GET(Fecode, 1 + 2*LINK_SIZE);
@@ -285,40 +286,42 @@ pointer. */
callout_ovector = (PCRE2_SIZE *)(Fovector) - 2;
-cb.version = 1;
-cb.capture_top = (uint32_t)Foffset_top/2 + 1;
-cb.capture_last = Fcapture_last;
-cb.offset_vector = callout_ovector;
-cb.mark = mb->nomatch_mark;
-cb.subject = mb->start_subject;
-cb.subject_length = (PCRE2_SIZE)(mb->end_subject - mb->start_subject);
-cb.start_match = (PCRE2_SIZE)(Fstart_match - mb->start_subject);
-cb.current_position = (PCRE2_SIZE)(Feptr - mb->start_subject);
-cb.pattern_position = GET(Fecode, 1);
-cb.next_item_length = GET(Fecode, 1 + LINK_SIZE);
+/* The cb->version, cb->subject, cb->subject_length, and cb->start_match fields
+are set externally. The first 3 never change; the last is updated for each
+bumpalong. */
+
+cb = mb->cb;
+cb->capture_top = (uint32_t)Foffset_top/2 + 1;
+cb->capture_last = Fcapture_last;
+cb->offset_vector = callout_ovector;
+cb->mark = mb->nomatch_mark;
+cb->current_position = (PCRE2_SIZE)(Feptr - mb->start_subject);
+cb->pattern_position = GET(Fecode, 1);
+cb->next_item_length = GET(Fecode, 1 + LINK_SIZE);
if (*Fecode == OP_CALLOUT) /* Numerical callout */
{
- cb.callout_number = Fecode[1 + 2*LINK_SIZE];
- cb.callout_string_offset = 0;
- cb.callout_string = NULL;
- cb.callout_string_length = 0;
+ cb->callout_number = Fecode[1 + 2*LINK_SIZE];
+ cb->callout_string_offset = 0;
+ cb->callout_string = NULL;
+ cb->callout_string_length = 0;
}
else /* String callout */
{
- cb.callout_number = 0;
- cb.callout_string_offset = GET(Fecode, 1 + 3*LINK_SIZE);
- cb.callout_string = Fecode + (1 + 4*LINK_SIZE) + 1;
- cb.callout_string_length =
+ cb->callout_number = 0;
+ cb->callout_string_offset = GET(Fecode, 1 + 3*LINK_SIZE);
+ cb->callout_string = Fecode + (1 + 4*LINK_SIZE) + 1;
+ cb->callout_string_length =
*lengthptr - (1 + 4*LINK_SIZE) - 2;
}
save0 = callout_ovector[0];
save1 = callout_ovector[1];
callout_ovector[0] = callout_ovector[1] = PCRE2_UNSET;
-rc = mb->callout(&cb, mb->callout_data);
+rc = mb->callout(cb, mb->callout_data);
callout_ovector[0] = save0;
callout_ovector[1] = save1;
+cb->callout_flags = 0;
return rc;
}
@@ -729,7 +732,7 @@ for (;;)
fprintf(stderr, "++ op=%d\n", *Fecode);
#endif
- Fop = *Fecode;
+ Fop = (uint8_t)(*Fecode); /* Cast needed for 16-bit and 32-bit modes */
switch(Fop)
{
/* ===================================================================== */
@@ -876,7 +879,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
}
Feptr++;
#ifdef SUPPORT_UNICODE
- if (utf) ACROSSCHAR(Feptr < mb->end_subject, *Feptr, Feptr++);
+ if (utf) ACROSSCHAR(Feptr < mb->end_subject, Feptr, Feptr++);
#endif
Fecode++;
break;
@@ -2440,55 +2443,9 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
}
else
{
- int lgb, rgb;
GETCHARINCTEST(fc, Feptr);
- lgb = UCD_GRAPHBREAK(fc);
- while (Feptr < mb->end_subject)
- {
- int len = 1;
- if (!utf) fc = *Feptr; else { GETCHARLEN(fc, Feptr, len); }
- rgb = UCD_GRAPHBREAK(fc);
- if ((PRIV(ucp_gbtable)[lgb] & (1 << rgb)) == 0) break;
-
- /* Not breaking between Regional Indicators is allowed only if there
- are an even number of preceding RIs. */
-
- if (lgb == ucp_gbRegionalIndicator && rgb == ucp_gbRegionalIndicator)
- {
- int ricount = 0;
- PCRE2_SPTR bptr = Feptr - 1;
-#ifdef SUPPORT_UNICODE
- if (utf) BACKCHAR(bptr);
-#endif
- /* bptr is pointing to the left-hand character */
-
- while (bptr > mb->start_subject)
- {
- bptr--;
-#ifdef SUPPORT_UNICODE
- if (utf)
- {
- BACKCHAR(bptr);
- GETCHAR(fc, bptr);
- }
- else
-#endif
- fc = *bptr;
- if (UCD_GRAPHBREAK(fc) != ucp_gbRegionalIndicator) break;
- ricount++;
- }
- if ((ricount & 1) != 0) break; /* Grapheme break required */
- }
-
- /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
- any number of Extend before a following E_Modifier. */
-
- if (rgb != ucp_gbExtend ||
- (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
- lgb = rgb;
-
- Feptr += len;
- }
+ Feptr = PRIV(extuni)(fc, Feptr, mb->start_subject, mb->end_subject, utf,
+ NULL);
}
CHECK_PARTIAL();
Fecode++;
@@ -2785,61 +2742,13 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
}
else
{
- int lgb, rgb;
GETCHARINCTEST(fc, Feptr);
- lgb = UCD_GRAPHBREAK(fc);
- while (Feptr < mb->end_subject)
- {
- int len = 1;
- if (!utf) fc = *Feptr; else { GETCHARLEN(fc, Feptr, len); }
- rgb = UCD_GRAPHBREAK(fc);
- if ((PRIV(ucp_gbtable)[lgb] & (1 << rgb)) == 0) break;
-
- /* Not breaking between Regional Indicators is allowed only if
- there are an even number of preceding RIs. */
-
- if (lgb == ucp_gbRegionalIndicator &&
- rgb == ucp_gbRegionalIndicator)
- {
- int ricount = 0;
- PCRE2_SPTR bptr = Feptr - 1;
-#ifdef SUPPORT_UNICODE
- if (utf) BACKCHAR(bptr);
-#endif
- /* bptr is pointing to the left-hand character */
-
- while (bptr > mb->start_subject)
- {
- bptr--;
-#ifdef SUPPORT_UNICODE
- if (utf)
- {
- BACKCHAR(bptr);
- GETCHAR(fc, bptr);
- }
- else
-#endif
- fc = *bptr;
- if (UCD_GRAPHBREAK(fc) != ucp_gbRegionalIndicator) break;
- ricount++;
- }
- if ((ricount & 1) != 0) break; /* Grapheme break required */
- }
-
- /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
- any number of Extend before a following E_Modifier. */
-
- if (rgb != ucp_gbExtend ||
- (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
- lgb = rgb;
-
- Feptr += len;
- }
+ Feptr = PRIV(extuni)(fc, Feptr, mb->start_subject,
+ mb->end_subject, utf, NULL);
}
CHECK_PARTIAL();
}
}
-
else
#endif /* SUPPORT_UNICODE */
@@ -2867,7 +2776,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
if (mb->partial > 1) return PCRE2_ERROR_PARTIAL;
}
Feptr++;
- ACROSSCHAR(Feptr < mb->end_subject, *Feptr, Feptr++);
+ ACROSSCHAR(Feptr < mb->end_subject, Feptr, Feptr++);
}
break;
@@ -2880,7 +2789,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
RRETURN(MATCH_NOMATCH);
}
Feptr++;
- ACROSSCHAR(Feptr < mb->end_subject, *Feptr, Feptr++);
+ ACROSSCHAR(Feptr < mb->end_subject, Feptr, Feptr++);
}
break;
@@ -3034,7 +2943,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
if (cc < 128 && (mb->ctypes[cc] & ctype_space) != 0)
RRETURN(MATCH_NOMATCH);
Feptr++;
- ACROSSCHAR(Feptr < mb->end_subject, *Feptr, Feptr++);
+ ACROSSCHAR(Feptr < mb->end_subject, Feptr, Feptr++);
}
break;
@@ -3068,7 +2977,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
if (cc < 128 && (mb->ctypes[cc] & ctype_word) != 0)
RRETURN(MATCH_NOMATCH);
Feptr++;
- ACROSSCHAR(Feptr < mb->end_subject, *Feptr, Feptr++);
+ ACROSSCHAR(Feptr < mb->end_subject, Feptr, Feptr++);
}
break;
@@ -3593,56 +3502,9 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
}
else
{
- int lgb, rgb;
GETCHARINCTEST(fc, Feptr);
- lgb = UCD_GRAPHBREAK(fc);
- while (Feptr < mb->end_subject)
- {
- int len = 1;
- if (!utf) fc = *Feptr; else { GETCHARLEN(fc, Feptr, len); }
- rgb = UCD_GRAPHBREAK(fc);
- if ((PRIV(ucp_gbtable)[lgb] & (1 << rgb)) == 0) break;
-
- /* Not breaking between Regional Indicators is allowed only if
- there are an even number of preceding RIs. */
-
- if (lgb == ucp_gbRegionalIndicator &&
- rgb == ucp_gbRegionalIndicator)
- {
- int ricount = 0;
- PCRE2_SPTR bptr = Feptr - 1;
-#ifdef SUPPORT_UNICODE
- if (utf) BACKCHAR(bptr);
-#endif
- /* bptr is pointing to the left-hand character */
-
- while (bptr > mb->start_subject)
- {
- bptr--;
-#ifdef SUPPORT_UNICODE
- if (utf)
- {
- BACKCHAR(bptr);
- GETCHAR(fc, bptr);
- }
- else
-#endif
- fc = *bptr;
- if (UCD_GRAPHBREAK(fc) != ucp_gbRegionalIndicator) break;
- ricount++;
- }
- if ((ricount & 1) != 0) break; /* Grapheme break required */
- }
-
- /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
- any number of Extend before a following E_Modifier. */
-
- if (rgb != ucp_gbExtend ||
- (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
- lgb = rgb;
-
- Feptr += len;
- }
+ Feptr = PRIV(extuni)(fc, Feptr, mb->start_subject, mb->end_subject,
+ utf, NULL);
}
CHECK_PARTIAL();
}
@@ -4167,56 +4029,9 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
}
else
{
- int lgb, rgb;
GETCHARINCTEST(fc, Feptr);
- lgb = UCD_GRAPHBREAK(fc);
- while (Feptr < mb->end_subject)
- {
- int len = 1;
- if (!utf) fc = *Feptr; else { GETCHARLEN(fc, Feptr, len); }
- rgb = UCD_GRAPHBREAK(fc);
- if ((PRIV(ucp_gbtable)[lgb] & (1 << rgb)) == 0) break;
-
- /* Not breaking between Regional Indicators is allowed only if
- there are an even number of preceding RIs. */
-
- if (lgb == ucp_gbRegionalIndicator &&
- rgb == ucp_gbRegionalIndicator)
- {
- int ricount = 0;
- PCRE2_SPTR bptr = Feptr - 1;
-#ifdef SUPPORT_UNICODE
- if (utf) BACKCHAR(bptr);
-#endif
- /* bptr is pointing to the left-hand character */
-
- while (bptr > mb->start_subject)
- {
- bptr--;
-#ifdef SUPPORT_UNICODE
- if (utf)
- {
- BACKCHAR(bptr);
- GETCHAR(fc, bptr);
- }
- else
-#endif
- fc = *bptr;
- if (UCD_GRAPHBREAK(fc) != ucp_gbRegionalIndicator) break;
- ricount++;
- }
- if ((ricount & 1) != 0) break; /* Grapheme break required */
- }
-
- /* If Extend follows E_Base[_GAZ] do not update lgb; this allows
- any number of Extend before a following E_Modifier. */
-
- if (rgb != ucp_gbExtend ||
- (lgb != ucp_gbE_Base && lgb != ucp_gbE_Base_GAZ))
- lgb = rgb;
-
- Feptr += len;
- }
+ Feptr = PRIV(extuni)(fc, Feptr, mb->start_subject, mb->end_subject,
+ utf, NULL);
}
CHECK_PARTIAL();
}
@@ -4295,7 +4110,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
if (mb->partial > 1) return PCRE2_ERROR_PARTIAL;
}
Feptr++;
- ACROSSCHAR(Feptr < mb->end_subject, *Feptr, Feptr++);
+ ACROSSCHAR(Feptr < mb->end_subject, Feptr, Feptr++);
}
break;
@@ -4310,7 +4125,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
break;
}
Feptr++;
- ACROSSCHAR(Feptr < mb->end_subject, *Feptr, Feptr++);
+ ACROSSCHAR(Feptr < mb->end_subject, Feptr, Feptr++);
}
}
else
@@ -5240,7 +5055,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
P = (heapframe *)((char *)N - frame_size);
if (N->group_frame_type == (GF_RECURSE | number))
{
- if (Feptr == P->eptr) RRETURN(PCRE2_ERROR_RECURSELOOP);
+ if (Feptr == P->eptr) return PCRE2_ERROR_RECURSELOOP;
break;
}
offset = P->last_group_offset;
@@ -6105,8 +5920,9 @@ in rrc. */
#define LBL(val) case val: goto L_RM##val;
RETURN_SWITCH:
-if (Frdepth == 0) return rrc; /* Exit from the top level */
-F = (heapframe *)((char *)F - Fback_frame); /* Back track */
+if (Frdepth == 0) return rrc; /* Exit from the top level */
+F = (heapframe *)((char *)F - Fback_frame); /* Back track */
+mb->cb->callout_flags |= PCRE2_CALLOUT_BACKTRACK; /* Note for callouts */
#ifdef DEBUG_SHOW_RMATCH
fprintf(stderr, "++ RETURN %d to %d\n", rrc, Freturn_id);
@@ -6196,6 +6012,7 @@ PCRE2_SIZE frame_size;
/* We need to have mb as a pointer to a match block, because the IS_NEWLINE
macro is used below, and it expects NLBLOCK to be defined as a pointer. */
+pcre2_callout_block cb;
match_block actual_match_block;
match_block *mb = &actual_match_block;
@@ -6356,6 +6173,15 @@ startline = (re->flags & PCRE2_STARTLINE) != 0;
bumpalong_limit = (mcontext->offset_limit == PCRE2_UNSET)?
end_subject : subject + mcontext->offset_limit;
+/* Initialize and set up the fixed fields in the callout block, with a pointer
+in the match block. */
+
+mb->cb = &cb;
+cb.version = 2;
+cb.subject = subject;
+cb.subject_length = (PCRE2_SIZE)(end_subject - subject);
+cb.callout_flags = 0;
+
/* Fill in the remaining fields in the match block. */
mb->callout = mcontext->callout;
@@ -6537,13 +6363,11 @@ for(;;)
if ((re->overall_options & PCRE2_NO_START_OPTIMIZE) == 0)
{
- PCRE2_SPTR save_end_subject = end_subject;
-
/* If firstline is TRUE, the start of the match is constrained to the first
line of a multiline string. That is, the match must be before or at the
- first newline. Implement this by temporarily adjusting end_subject so that
- we stop the optimization scans for a first code unit at a newline. If the
- match fails at the newline, later code breaks this loop. */
+ first newline following the start of matching. Temporarily adjust
+ end_subject so that we stop the scans for a first code unit at a newline.
+ If the match fails at the newline, later code breaks the loop. */
if (firstline)
{
@@ -6551,15 +6375,15 @@ for(;;)
#ifdef SUPPORT_UNICODE
if (utf)
{
- while (t < mb->end_subject && !IS_NEWLINE(t))
+ while (t < end_subject && !IS_NEWLINE(t))
{
t++;
- ACROSSCHAR(t < end_subject, *t, t++);
+ ACROSSCHAR(t < end_subject, t, t++);
}
}
else
#endif
- while (t < mb->end_subject && !IS_NEWLINE(t)) t++;
+ while (t < end_subject && !IS_NEWLINE(t)) t++;
end_subject = t;
}
@@ -6635,13 +6459,17 @@ for(;;)
#endif
}
- /* If we can't find the required code unit, break the bumpalong loop,
- to force a match failure, except when doing partial matching, when we
- let the next cycle run at the end of the subject. To see why, consider
- the pattern /(?<=abc)def/, which partially matches "abc", even though
- the string does not contain the starting character "d". */
-
- if (!mb->partial && start_match >= end_subject)
+ /* If we can't find the required code unit, having reached the true end
+ of the subject, break the bumpalong loop, to force a match failure,
+ except when doing partial matching, when we let the next cycle run at
+ the end of the subject. To see why, consider the pattern /(?<=abc)def/,
+ which partially matches "abc", even though the string does not contain
+ the starting character "d". If we have not reached the true end of the
+ subject (PCRE2_FIRSTLINE caused end_subject to be temporarily modified)
+ we also let the cycle run, because the matching string is legitimately
+ allowed to start with the first code unit of a newline. */
+
+ if (!mb->partial && start_match >= mb->end_subject)
{
rc = MATCH_NOMATCH;
break;
@@ -6661,8 +6489,7 @@ for(;;)
while (start_match < end_subject && !WAS_NEWLINE(start_match))
{
start_match++;
- ACROSSCHAR(start_match < end_subject, *start_match,
- start_match++);
+ ACROSSCHAR(start_match < end_subject, start_match, start_match++);
}
}
else
@@ -6698,12 +6525,20 @@ for(;;)
if ((start_bits[c/8] & (1 << (c&7))) != 0) break;
start_match++;
}
+
+ /* See comment above in first_cu checking about the next few lines. */
+
+ if (!mb->partial && start_match >= mb->end_subject)
+ {
+ rc = MATCH_NOMATCH;
+ break;
+ }
}
} /* End first code unit handling */
/* Restore fudged end_subject */
- end_subject = save_end_subject;
+ end_subject = mb->end_subject;
/* The following two optimizations must be disabled for partial matching. */
@@ -6820,6 +6655,9 @@ for(;;)
/* OK, we can now run the match. If "hitend" is set afterwards, remember the
first starting point for which a partial match was found. */
+ cb.start_match = (PCRE2_SIZE)(start_match - subject);
+ cb.callout_flags |= PCRE2_CALLOUT_STARTMATCH;
+
mb->start_used_ptr = start_match;
mb->last_used_ptr = start_match;
mb->match_call_count = 0;
@@ -6870,7 +6708,7 @@ for(;;)
new_start_match = start_match + 1;
#ifdef SUPPORT_UNICODE
if (utf)
- ACROSSCHAR(new_start_match < end_subject, *new_start_match,
+ ACROSSCHAR(new_start_match < end_subject, new_start_match,
new_start_match++);
#endif
break;
diff --git a/src/3rdparty/pcre2/src/pcre2_pattern_info.c b/src/3rdparty/pcre2/src/pcre2_pattern_info.c
index 540707b225..906e9198f5 100644
--- a/src/3rdparty/pcre2/src/pcre2_pattern_info.c
+++ b/src/3rdparty/pcre2/src/pcre2_pattern_info.c
@@ -76,6 +76,7 @@ if (where == NULL) /* Requests field length */
case PCRE2_INFO_BSR:
case PCRE2_INFO_CAPTURECOUNT:
case PCRE2_INFO_DEPTHLIMIT:
+ case PCRE2_INFO_EXTRAOPTIONS:
case PCRE2_INFO_FIRSTCODETYPE:
case PCRE2_INFO_FIRSTCODEUNIT:
case PCRE2_INFO_HASBACKSLASHC:
@@ -144,6 +145,10 @@ switch(what)
if (re->limit_depth == UINT32_MAX) return PCRE2_ERROR_UNSET;
break;
+ case PCRE2_INFO_EXTRAOPTIONS:
+ *((uint32_t *)where) = re->extra_options;
+ break;
+
case PCRE2_INFO_FIRSTCODETYPE:
*((uint32_t *)where) = ((re->flags & PCRE2_FIRSTSET) != 0)? 1 :
((re->flags & PCRE2_STARTLINE) != 0)? 2 : 0;
diff --git a/src/3rdparty/pcre2/src/pcre2_substring.c b/src/3rdparty/pcre2/src/pcre2_substring.c
index f6d7c39722..ddf5774e15 100644
--- a/src/3rdparty/pcre2/src/pcre2_substring.c
+++ b/src/3rdparty/pcre2/src/pcre2_substring.c
@@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language.
Written by Philip Hazel
Original API code Copyright (c) 1997-2012 University of Cambridge
- New API code Copyright (c) 2016 University of Cambridge
+ New API code Copyright (c) 2016-2018 University of Cambridge
-----------------------------------------------------------------------------
Redistribution and use in source and binary forms, with or without
@@ -414,7 +414,12 @@ else
for (i = 0; i < count2; i += 2)
{
size = (ovector[i+1] > ovector[i])? (ovector[i+1] - ovector[i]) : 0;
- memcpy(sp, match_data->subject + ovector[i], CU2BYTES(size));
+
+ /* Size == 0 includes the case when the capture is unset. Avoid adding
+ PCRE2_UNSET to match_data->subject because it overflows, even though with
+ zero size calling memcpy() is harmless. */
+
+ if (size != 0) memcpy(sp, match_data->subject + ovector[i], CU2BYTES(size));
*listp++ = sp;
if (lensp != NULL) *lensp++ = size;
sp += size;
diff --git a/src/3rdparty/pcre2/src/sljit/sljitConfig.h b/src/3rdparty/pcre2/src/sljit/sljitConfig.h
index b65584a4af..d54b5e6f54 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitConfig.h
+++ b/src/3rdparty/pcre2/src/sljit/sljitConfig.h
@@ -108,8 +108,10 @@
/* Force cdecl calling convention even if a better calling
convention (e.g. fastcall) is supported by the C compiler.
- If this option is enabled, C functions without
- SLJIT_CALL can also be called from JIT code. */
+ If this option is disabled (this is the default), functions
+ called from JIT should be defined with SLJIT_FUNC attribute.
+ Standard C functions can still be called by using the
+ SLJIT_CALL_CDECL jump type. */
#ifndef SLJIT_USE_CDECL_CALLING_CONVENTION
/* Disabled by default */
#define SLJIT_USE_CDECL_CALLING_CONVENTION 0
diff --git a/src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h b/src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h
index cc0810fbd7..e13282c842 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h
+++ b/src/3rdparty/pcre2/src/sljit/sljitConfigInternal.h
@@ -60,11 +60,13 @@
a single precision floating point array by index
SLJIT_F64_SHIFT : the shift required to apply when accessing
a double precision floating point array by index
+ SLJIT_PREF_SHIFT_REG : x86 systems prefers ecx for shifting by register
+ the scratch register index of ecx is stored in this variable
SLJIT_LOCALS_OFFSET : local space starting offset (SLJIT_SP + SLJIT_LOCALS_OFFSET)
SLJIT_RETURN_ADDRESS_OFFSET : a return instruction always adds this offset to the return address
Other macros:
- SLJIT_CALL : C calling convention define for both calling JIT form C and C callbacks for JIT
+ SLJIT_FUNC : calling convention attribute for both calling JIT form C and C calling back from JIT
SLJIT_W(number) : defining 64 bit constants on 64 bit architectures (compiler independent helper)
*/
@@ -471,44 +473,44 @@ typedef double sljit_f64;
/* Calling convention of functions generated by SLJIT or called from the generated code. */
/*****************************************************************************************/
-#ifndef SLJIT_CALL
+#ifndef SLJIT_FUNC
#if (defined SLJIT_USE_CDECL_CALLING_CONVENTION && SLJIT_USE_CDECL_CALLING_CONVENTION)
/* Force cdecl. */
-#define SLJIT_CALL
+#define SLJIT_FUNC
#elif (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
#if defined(__GNUC__) && !defined(__APPLE__)
-#define SLJIT_CALL __attribute__ ((fastcall))
+#define SLJIT_FUNC __attribute__ ((fastcall))
#define SLJIT_X86_32_FASTCALL 1
#elif defined(_MSC_VER)
-#define SLJIT_CALL __fastcall
+#define SLJIT_FUNC __fastcall
#define SLJIT_X86_32_FASTCALL 1
#elif defined(__BORLANDC__)
-#define SLJIT_CALL __msfastcall
+#define SLJIT_FUNC __msfastcall
#define SLJIT_X86_32_FASTCALL 1
#else /* Unknown compiler. */
/* The cdecl attribute is the default. */
-#define SLJIT_CALL
+#define SLJIT_FUNC
#endif
#else /* Non x86-32 architectures. */
-#define SLJIT_CALL
+#define SLJIT_FUNC
#endif /* SLJIT_CONFIG_X86_32 */
-#endif /* !SLJIT_CALL */
+#endif /* !SLJIT_FUNC */
#ifndef SLJIT_INDIRECT_CALL
#if ((defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) && (defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN)) \
@@ -557,24 +559,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
#define SLJIT_NUMBER_OF_REGISTERS 12
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 9
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset)
-#else
-/* Maximum 3 arguments are passed on the stack, +1 for double alignment. */
-#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset)
-#endif /* SLJIT_X86_32_FASTCALL */
+#define SLJIT_PREF_SHIFT_REG SLJIT_R2
#elif (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-#ifndef _WIN64
#define SLJIT_NUMBER_OF_REGISTERS 13
+#ifndef _WIN64
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 6
#define SLJIT_LOCALS_OFFSET_BASE 0
-#else
-#define SLJIT_NUMBER_OF_REGISTERS 13
+#else /* _WIN64 */
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8
#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset)
-#endif /* _WIN64 */
+#endif /* !_WIN64 */
+#define SLJIT_PREF_SHIFT_REG SLJIT_R3
#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
@@ -590,13 +588,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
#elif (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
-#define SLJIT_NUMBER_OF_REGISTERS 25
+#define SLJIT_NUMBER_OF_REGISTERS 26
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 10
#define SLJIT_LOCALS_OFFSET_BASE (2 * sizeof(sljit_sw))
#elif (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
-#define SLJIT_NUMBER_OF_REGISTERS 22
+#define SLJIT_NUMBER_OF_REGISTERS 23
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 17
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) || (defined _AIX)
#define SLJIT_LOCALS_OFFSET_BASE ((6 + 8) * sizeof(sljit_sw))
@@ -622,8 +620,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
#define SLJIT_NUMBER_OF_REGISTERS 18
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 14
#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
-/* Add +1 for double alignment. */
-#define SLJIT_LOCALS_OFFSET_BASE ((23 + 1) * sizeof(sljit_sw))
+/* saved registers (16), return struct pointer (1), space for 6 argument words (1),
+ 4th double arg (2), double alignment (1). */
+#define SLJIT_LOCALS_OFFSET_BASE ((16 + 1 + 6 + 2 + 1) * sizeof(sljit_sw))
#endif
#elif (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX)
diff --git a/src/3rdparty/pcre2/src/sljit/sljitLir.c b/src/3rdparty/pcre2/src/sljit/sljitLir.c
index c0bbb5201a..5e435f0154 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitLir.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitLir.c
@@ -97,8 +97,13 @@
#define GET_ALL_FLAGS(op) \
((op) & (SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK))
+#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE)
+#define TYPE_CAST_NEEDED(op) \
+ ((op) >= SLJIT_MOV_U8 && (op) <= SLJIT_MOV_S32)
+#else
#define TYPE_CAST_NEEDED(op) \
- (((op) >= SLJIT_MOV_U8 && (op) <= SLJIT_MOV_S16) || ((op) >= SLJIT_MOVU_U8 && (op) <= SLJIT_MOVU_S16))
+ ((op) >= SLJIT_MOV_U8 && (op) <= SLJIT_MOV_S16)
+#endif
#define BUF_SIZE 4096
@@ -118,6 +123,9 @@
/* When reg can be unused. */
#define SLOW_IS_REG(reg) ((reg) > 0 && (reg) <= REG_MASK)
+/* Mask for argument types. */
+#define SLJIT_DEF_MASK ((1 << SLJIT_DEF_SHIFT) - 1)
+
/* Jump flags. */
#define JUMP_LABEL 0x1
#define JUMP_ADDR 0x2
@@ -591,6 +599,19 @@ static SLJIT_INLINE void reverse_buf(struct sljit_compiler *compiler)
compiler->buf = prev;
}
+static SLJIT_INLINE sljit_s32 get_arg_count(sljit_s32 arg_types)
+{
+ sljit_s32 arg_count = 0;
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ while (arg_types) {
+ arg_count++;
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ return arg_count;
+}
+
static SLJIT_INLINE void set_emit_enter(struct sljit_compiler *compiler,
sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
@@ -664,80 +685,106 @@ static SLJIT_INLINE void set_const(struct sljit_const *const_, struct sljit_comp
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
#define FUNCTION_CHECK_IS_REG(r) \
- (((r) >= SLJIT_R0 && (r) < (SLJIT_R0 + compiler->scratches)) || \
- ((r) > (SLJIT_S0 - compiler->saveds) && (r) <= SLJIT_S0))
+ (((r) >= SLJIT_R0 && (r) < (SLJIT_R0 + compiler->scratches)) \
+ || ((r) > (SLJIT_S0 - compiler->saveds) && (r) <= SLJIT_S0))
-#define FUNCTION_CHECK_IS_REG_OR_UNUSED(r) \
- ((r) == SLJIT_UNUSED || \
- ((r) >= SLJIT_R0 && (r) < (SLJIT_R0 + compiler->scratches)) || \
- ((r) > (SLJIT_S0 - compiler->saveds) && (r) <= SLJIT_S0))
+#define FUNCTION_CHECK_IS_FREG(fr) \
+ (((fr) >= SLJIT_FR0 && (fr) < (SLJIT_FR0 + compiler->fscratches)) \
+ || ((fr) > (SLJIT_FS0 - compiler->fsaveds) && (fr) <= SLJIT_FS0))
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
-#define CHECK_NOT_VIRTUAL_REGISTER(p) \
- CHECK_ARGUMENT((p) < SLJIT_R3 || (p) > SLJIT_R6);
+#define CHECK_IF_VIRTUAL_REGISTER(p) ((p) <= SLJIT_S3 && (p) >= SLJIT_S8)
#else
-#define CHECK_NOT_VIRTUAL_REGISTER(p)
+#define CHECK_IF_VIRTUAL_REGISTER(p) 0
#endif
-#define FUNCTION_CHECK_SRC(p, i) \
- CHECK_ARGUMENT(compiler->scratches != -1 && compiler->saveds != -1); \
- if (FUNCTION_CHECK_IS_REG(p)) \
- CHECK_ARGUMENT((i) == 0); \
- else if ((p) == SLJIT_IMM) \
- ; \
- else if ((p) == (SLJIT_MEM1(SLJIT_SP))) \
- CHECK_ARGUMENT((i) >= 0 && (i) < compiler->logical_local_size); \
- else { \
- CHECK_ARGUMENT((p) & SLJIT_MEM); \
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG_OR_UNUSED((p) & REG_MASK)); \
- CHECK_NOT_VIRTUAL_REGISTER((p) & REG_MASK); \
- if ((p) & OFFS_REG_MASK) { \
- CHECK_ARGUMENT(((p) & REG_MASK) != SLJIT_UNUSED); \
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(OFFS_REG(p))); \
- CHECK_NOT_VIRTUAL_REGISTER(OFFS_REG(p)); \
- CHECK_ARGUMENT(!((i) & ~0x3)); \
- } \
- CHECK_ARGUMENT(!((p) & ~(SLJIT_MEM | REG_MASK | OFFS_REG_MASK))); \
+static sljit_s32 function_check_src_mem(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
+{
+ if (compiler->scratches == -1 || compiler->saveds == -1)
+ return 0;
+
+ if (!(p & SLJIT_MEM))
+ return 0;
+
+ if (!((p & REG_MASK) == SLJIT_UNUSED || FUNCTION_CHECK_IS_REG(p & REG_MASK)))
+ return 0;
+
+ if (CHECK_IF_VIRTUAL_REGISTER(p & REG_MASK))
+ return 0;
+
+ if (p & OFFS_REG_MASK) {
+ if ((p & REG_MASK) == SLJIT_UNUSED)
+ return 0;
+
+ if (!(FUNCTION_CHECK_IS_REG(OFFS_REG(p))))
+ return 0;
+
+ if (CHECK_IF_VIRTUAL_REGISTER(OFFS_REG(p)))
+ return 0;
+
+ if ((i & ~0x3) != 0)
+ return 0;
}
+ return (p & ~(SLJIT_MEM | REG_MASK | OFFS_REG_MASK)) == 0;
+}
+
+#define FUNCTION_CHECK_SRC_MEM(p, i) \
+ CHECK_ARGUMENT(function_check_src_mem(compiler, p, i));
+
+static sljit_s32 function_check_src(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
+{
+ if (compiler->scratches == -1 || compiler->saveds == -1)
+ return 0;
+
+ if (FUNCTION_CHECK_IS_REG(p))
+ return (i == 0);
+
+ if (p == SLJIT_IMM)
+ return 1;
+
+ if (p == SLJIT_MEM1(SLJIT_SP))
+ return (i >= 0 && i < compiler->logical_local_size);
+
+ return function_check_src_mem(compiler, p, i);
+}
+
+#define FUNCTION_CHECK_SRC(p, i) \
+ CHECK_ARGUMENT(function_check_src(compiler, p, i));
+
+static sljit_s32 function_check_dst(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i, sljit_s32 unused)
+{
+ if (compiler->scratches == -1 || compiler->saveds == -1)
+ return 0;
+
+ if (FUNCTION_CHECK_IS_REG(p) || ((unused) && (p) == SLJIT_UNUSED))
+ return (i == 0);
+
+ if (p == SLJIT_MEM1(SLJIT_SP))
+ return (i >= 0 && i < compiler->logical_local_size);
+
+ return function_check_src_mem(compiler, p, i);
+}
+
#define FUNCTION_CHECK_DST(p, i, unused) \
- CHECK_ARGUMENT(compiler->scratches != -1 && compiler->saveds != -1); \
- if (FUNCTION_CHECK_IS_REG(p) || ((unused) && (p) == SLJIT_UNUSED)) \
- CHECK_ARGUMENT((i) == 0); \
- else if ((p) == (SLJIT_MEM1(SLJIT_SP))) \
- CHECK_ARGUMENT((i) >= 0 && (i) < compiler->logical_local_size); \
- else { \
- CHECK_ARGUMENT((p) & SLJIT_MEM); \
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG_OR_UNUSED((p) & REG_MASK)); \
- CHECK_NOT_VIRTUAL_REGISTER((p) & REG_MASK); \
- if ((p) & OFFS_REG_MASK) { \
- CHECK_ARGUMENT(((p) & REG_MASK) != SLJIT_UNUSED); \
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(OFFS_REG(p))); \
- CHECK_NOT_VIRTUAL_REGISTER(OFFS_REG(p)); \
- CHECK_ARGUMENT(!((i) & ~0x3)); \
- } \
- CHECK_ARGUMENT(!((p) & ~(SLJIT_MEM | REG_MASK | OFFS_REG_MASK))); \
- }
+ CHECK_ARGUMENT(function_check_dst(compiler, p, i, unused));
+
+static sljit_s32 function_fcheck(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
+{
+ if (compiler->scratches == -1 || compiler->saveds == -1)
+ return 0;
+
+ if (FUNCTION_CHECK_IS_FREG(p))
+ return (i == 0);
+
+ if (p == SLJIT_MEM1(SLJIT_SP))
+ return (i >= 0 && i < compiler->logical_local_size);
+
+ return function_check_src_mem(compiler, p, i);
+}
#define FUNCTION_FCHECK(p, i) \
- CHECK_ARGUMENT(compiler->fscratches != -1 && compiler->fsaveds != -1); \
- if (((p) >= SLJIT_FR0 && (p) < (SLJIT_FR0 + compiler->fscratches)) || \
- ((p) > (SLJIT_FS0 - compiler->fsaveds) && (p) <= SLJIT_FS0)) \
- CHECK_ARGUMENT(i == 0); \
- else if ((p) == (SLJIT_MEM1(SLJIT_SP))) \
- CHECK_ARGUMENT((i) >= 0 && (i) < compiler->logical_local_size); \
- else { \
- CHECK_ARGUMENT((p) & SLJIT_MEM); \
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG_OR_UNUSED((p) & REG_MASK)); \
- CHECK_NOT_VIRTUAL_REGISTER((p) & REG_MASK); \
- if ((p) & OFFS_REG_MASK) { \
- CHECK_ARGUMENT(((p) & REG_MASK) != SLJIT_UNUSED); \
- CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(OFFS_REG(p))); \
- CHECK_NOT_VIRTUAL_REGISTER(OFFS_REG(p)); \
- CHECK_ARGUMENT(((p) & OFFS_REG_MASK) != TO_OFFS_REG(SLJIT_SP) && !(i & ~0x3)); \
- } \
- CHECK_ARGUMENT(!((p) & ~(SLJIT_MEM | REG_MASK | OFFS_REG_MASK))); \
- }
+ CHECK_ARGUMENT(function_fcheck(compiler, p, i));
#endif /* SLJIT_ARGUMENT_CHECKS */
@@ -758,64 +805,72 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *comp
# define SLJIT_PRINT_D ""
#endif
-#define sljit_verbose_reg(compiler, r) \
- do { \
- if ((r) < (SLJIT_R0 + compiler->scratches)) \
- fprintf(compiler->verbose, "r%d", (r) - SLJIT_R0); \
- else if ((r) != SLJIT_SP) \
- fprintf(compiler->verbose, "s%d", SLJIT_NUMBER_OF_REGISTERS - (r)); \
- else \
- fprintf(compiler->verbose, "sp"); \
- } while (0)
+static void sljit_verbose_reg(struct sljit_compiler *compiler, sljit_s32 r)
+{
+ if (r < (SLJIT_R0 + compiler->scratches))
+ fprintf(compiler->verbose, "r%d", r - SLJIT_R0);
+ else if (r != SLJIT_SP)
+ fprintf(compiler->verbose, "s%d", SLJIT_NUMBER_OF_REGISTERS - r);
+ else
+ fprintf(compiler->verbose, "sp");
+}
-#define sljit_verbose_param(compiler, p, i) \
- if ((p) & SLJIT_IMM) \
- fprintf(compiler->verbose, "#%" SLJIT_PRINT_D "d", (i)); \
- else if ((p) & SLJIT_MEM) { \
- if ((p) & REG_MASK) { \
- fputc('[', compiler->verbose); \
- sljit_verbose_reg(compiler, (p) & REG_MASK); \
- if ((p) & OFFS_REG_MASK) { \
- fprintf(compiler->verbose, " + "); \
- sljit_verbose_reg(compiler, OFFS_REG(p)); \
- if (i) \
- fprintf(compiler->verbose, " * %d", 1 << (i)); \
- } \
- else if (i) \
- fprintf(compiler->verbose, " + %" SLJIT_PRINT_D "d", (i)); \
- fputc(']', compiler->verbose); \
- } \
- else \
- fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i)); \
- } else if (p) \
- sljit_verbose_reg(compiler, p); \
- else \
+static void sljit_verbose_freg(struct sljit_compiler *compiler, sljit_s32 r)
+{
+ if (r < (SLJIT_FR0 + compiler->fscratches))
+ fprintf(compiler->verbose, "fr%d", r - SLJIT_FR0);
+ else
+ fprintf(compiler->verbose, "fs%d", SLJIT_NUMBER_OF_FLOAT_REGISTERS - r);
+}
+
+static void sljit_verbose_param(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
+{
+ if ((p) & SLJIT_IMM)
+ fprintf(compiler->verbose, "#%" SLJIT_PRINT_D "d", (i));
+ else if ((p) & SLJIT_MEM) {
+ if ((p) & REG_MASK) {
+ fputc('[', compiler->verbose);
+ sljit_verbose_reg(compiler, (p) & REG_MASK);
+ if ((p) & OFFS_REG_MASK) {
+ fprintf(compiler->verbose, " + ");
+ sljit_verbose_reg(compiler, OFFS_REG(p));
+ if (i)
+ fprintf(compiler->verbose, " * %d", 1 << (i));
+ }
+ else if (i)
+ fprintf(compiler->verbose, " + %" SLJIT_PRINT_D "d", (i));
+ fputc(']', compiler->verbose);
+ }
+ else
+ fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i));
+ } else if (p)
+ sljit_verbose_reg(compiler, p);
+ else
fprintf(compiler->verbose, "unused");
+}
-#define sljit_verbose_fparam(compiler, p, i) \
- if ((p) & SLJIT_MEM) { \
- if ((p) & REG_MASK) { \
- fputc('[', compiler->verbose); \
- sljit_verbose_reg(compiler, (p) & REG_MASK); \
- if ((p) & OFFS_REG_MASK) { \
- fprintf(compiler->verbose, " + "); \
- sljit_verbose_reg(compiler, OFFS_REG(p)); \
- if (i) \
- fprintf(compiler->verbose, "%d", 1 << (i)); \
- } \
- else if (i) \
- fprintf(compiler->verbose, "%" SLJIT_PRINT_D "d", (i)); \
- fputc(']', compiler->verbose); \
- } \
- else \
- fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i)); \
- } \
- else { \
- if ((p) < (SLJIT_FR0 + compiler->fscratches)) \
- fprintf(compiler->verbose, "fr%d", (p) - SLJIT_FR0); \
- else \
- fprintf(compiler->verbose, "fs%d", SLJIT_NUMBER_OF_FLOAT_REGISTERS - (p)); \
+static void sljit_verbose_fparam(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i)
+{
+ if ((p) & SLJIT_MEM) {
+ if ((p) & REG_MASK) {
+ fputc('[', compiler->verbose);
+ sljit_verbose_reg(compiler, (p) & REG_MASK);
+ if ((p) & OFFS_REG_MASK) {
+ fprintf(compiler->verbose, " + ");
+ sljit_verbose_reg(compiler, OFFS_REG(p));
+ if (i)
+ fprintf(compiler->verbose, "%d", 1 << (i));
+ }
+ else if (i)
+ fprintf(compiler->verbose, " + %" SLJIT_PRINT_D "d", (i));
+ fputc(']', compiler->verbose);
+ }
+ else
+ fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i));
}
+ else
+ sljit_verbose_freg(compiler, p);
+}
static const char* op0_names[] = {
(char*)"breakpoint", (char*)"nop", (char*)"lmul.uw", (char*)"lmul.sw",
@@ -864,7 +919,11 @@ static char* jump_names[] = {
(char*)"greater", (char*)"less_equal",
(char*)"unordered", (char*)"ordered",
(char*)"jump", (char*)"fast_call",
- (char*)"call0", (char*)"call1", (char*)"call2", (char*)"call3"
+ (char*)"call", (char*)"call.cdecl"
+};
+
+static char* call_arg_names[] = {
+ (char*)"void", (char*)"sw", (char*)"uw", (char*)"s32", (char*)"u32", (char*)"f32", (char*)"f64"
};
#endif /* SLJIT_VERBOSE */
@@ -897,53 +956,104 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_generate_code(struct sljit_com
}
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ sljit_s32 types, arg_count, curr_type;
+#endif
+
SLJIT_UNUSED_ARG(compiler);
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(!(options & ~SLJIT_F64_ALIGNMENT));
- CHECK_ARGUMENT(args >= 0 && args <= 3);
CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS);
CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_REGISTERS);
CHECK_ARGUMENT(scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS);
- CHECK_ARGUMENT(args <= saveds);
CHECK_ARGUMENT(fscratches >= 0 && fscratches <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
CHECK_ARGUMENT(fsaveds >= 0 && fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
CHECK_ARGUMENT(fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
CHECK_ARGUMENT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE);
+ CHECK_ARGUMENT((arg_types & SLJIT_DEF_MASK) == 0);
+
+ types = (arg_types >> SLJIT_DEF_SHIFT);
+ arg_count = 0;
+ while (types != 0 && arg_count < 3) {
+ curr_type = (types & SLJIT_DEF_MASK);
+ CHECK_ARGUMENT(curr_type == SLJIT_ARG_TYPE_SW || curr_type == SLJIT_ARG_TYPE_UW);
+ arg_count++;
+ types >>= SLJIT_DEF_SHIFT;
+ }
+ CHECK_ARGUMENT(arg_count <= saveds && types == 0);
+
compiler->last_flags = 0;
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
- if (SLJIT_UNLIKELY(!!compiler->verbose))
- fprintf(compiler->verbose, " enter options:none args:%d scratches:%d saveds:%d fscratches:%d fsaveds:%d local_size:%d\n",
- args, scratches, saveds, fscratches, fsaveds, local_size);
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " enter options:%s args[", (options & SLJIT_F64_ALIGNMENT) ? "f64_align" : "");
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ while (arg_types) {
+ fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]);
+ arg_types >>= SLJIT_DEF_SHIFT;
+ if (arg_types)
+ fprintf(compiler->verbose, ",");
+ }
+
+ fprintf(compiler->verbose, "] scratches:%d saveds:%d fscratches:%d fsaveds:%d local_size:%d\n",
+ scratches, saveds, fscratches, fsaveds, local_size);
+ }
#endif
CHECK_RETURN_OK;
}
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ sljit_s32 types, arg_count, curr_type;
+#endif
+
+ SLJIT_UNUSED_ARG(compiler);
+
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(!(options & ~SLJIT_F64_ALIGNMENT));
- CHECK_ARGUMENT(args >= 0 && args <= 3);
CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS);
CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_REGISTERS);
CHECK_ARGUMENT(scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS);
- CHECK_ARGUMENT(args <= saveds);
CHECK_ARGUMENT(fscratches >= 0 && fscratches <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
CHECK_ARGUMENT(fsaveds >= 0 && fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
CHECK_ARGUMENT(fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS);
CHECK_ARGUMENT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE);
+
+ types = (arg_types >> SLJIT_DEF_SHIFT);
+ arg_count = 0;
+ while (types != 0 && arg_count < 3) {
+ curr_type = (types & SLJIT_DEF_MASK);
+ CHECK_ARGUMENT(curr_type == SLJIT_ARG_TYPE_SW || curr_type == SLJIT_ARG_TYPE_UW);
+ arg_count++;
+ types >>= SLJIT_DEF_SHIFT;
+ }
+ CHECK_ARGUMENT(arg_count <= saveds && types == 0);
+
compiler->last_flags = 0;
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
- if (SLJIT_UNLIKELY(!!compiler->verbose))
- fprintf(compiler->verbose, " set_context options:none args:%d scratches:%d saveds:%d fscratches:%d fsaveds:%d local_size:%d\n",
- args, scratches, saveds, fscratches, fsaveds, local_size);
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " set_context options:%s args[", (options & SLJIT_F64_ALIGNMENT) ? "f64_align" : "");
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ while (arg_types) {
+ fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]);
+ arg_types >>= SLJIT_DEF_SHIFT;
+ if (arg_types)
+ fprintf(compiler->verbose, ",");
+ }
+
+ fprintf(compiler->verbose, "] scratches:%d saveds:%d fscratches:%d fsaveds:%d local_size:%d\n",
+ scratches, saveds, fscratches, fsaveds, local_size);
+ }
#endif
CHECK_RETURN_OK;
}
@@ -994,6 +1104,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fast_return(struct sljit_
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
FUNCTION_CHECK_SRC(src, srcw);
+ CHECK_ARGUMENT(src != SLJIT_IMM);
compiler->last_flags = 0;
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
@@ -1052,9 +1163,6 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op1(struct sljit_compiler
case SLJIT_MOV:
case SLJIT_MOV_U32:
case SLJIT_MOV_P:
- case SLJIT_MOVU:
- case SLJIT_MOVU_U32:
- case SLJIT_MOVU_P:
/* Nothing allowed */
CHECK_ARGUMENT(!(op & (SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK)));
break;
@@ -1067,28 +1175,17 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op1(struct sljit_compiler
FUNCTION_CHECK_DST(dst, dstw, 1);
FUNCTION_CHECK_SRC(src, srcw);
- if (GET_OPCODE(op) >= SLJIT_NOT)
+ if (GET_OPCODE(op) >= SLJIT_NOT) {
+ CHECK_ARGUMENT(src != SLJIT_IMM);
compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_I32_OP | SLJIT_SET_Z));
- else if (GET_OPCODE(op) >= SLJIT_MOVU) {
- CHECK_ARGUMENT(!(src & SLJIT_MEM) || (src & REG_MASK) != SLJIT_SP);
- CHECK_ARGUMENT(!(dst & SLJIT_MEM) || (dst & REG_MASK) != SLJIT_SP);
- if ((src & REG_MASK) != SLJIT_UNUSED) {
- CHECK_ARGUMENT((src & REG_MASK) != (dst & REG_MASK) && (src & REG_MASK) != OFFS_REG(dst));
- CHECK_ARGUMENT((src & OFFS_REG_MASK) == SLJIT_UNUSED || srcw == 0);
- }
- if ((dst & REG_MASK) != SLJIT_UNUSED) {
- CHECK_ARGUMENT((dst & REG_MASK) != OFFS_REG(src));
- CHECK_ARGUMENT((dst & OFFS_REG_MASK) == SLJIT_UNUSED || dstw == 0);
- }
- compiler->last_flags = 0;
}
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- if (GET_OPCODE(op) <= SLJIT_MOVU_P)
+ if (GET_OPCODE(op) <= SLJIT_MOV_P)
{
- fprintf(compiler->verbose, " mov%s%s%s ", (GET_OPCODE(op) >= SLJIT_MOVU) ? "u" : "",
- !(op & SLJIT_I32_OP) ? "" : "32", (op != SLJIT_MOV32 && op != SLJIT_MOVU32) ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : "");
+ fprintf(compiler->verbose, " mov%s%s ", !(op & SLJIT_I32_OP) ? "" : "32",
+ (op != SLJIT_MOV32) ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : "");
}
else
{
@@ -1417,9 +1514,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_jump(struct sljit_compile
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_I32_OP)));
CHECK_ARGUMENT((type & 0xff) != GET_FLAG_TYPE(SLJIT_SET_CARRY) && (type & 0xff) != (GET_FLAG_TYPE(SLJIT_SET_CARRY) + 1));
- CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_CALL3);
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_FAST_CALL);
CHECK_ARGUMENT((type & 0xff) < SLJIT_JUMP || !(type & SLJIT_I32_OP));
- CHECK_ARGUMENT((type & 0xff) <= SLJIT_CALL0 || ((type & 0xff) - SLJIT_CALL0) <= compiler->scratches);
if ((type & 0xff) < SLJIT_JUMP) {
if ((type & 0xff) <= SLJIT_NOT_ZERO)
@@ -1439,6 +1535,63 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_jump(struct sljit_compile
CHECK_RETURN_OK;
}
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ sljit_s32 i, types, curr_type, scratches, fscratches;
+
+ CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP)));
+ CHECK_ARGUMENT((type & 0xff) == SLJIT_CALL || (type & 0xff) == SLJIT_CALL_CDECL);
+
+ types = arg_types;
+ scratches = 0;
+ fscratches = 0;
+ for (i = 0; i < 5; i++) {
+ curr_type = (types & SLJIT_DEF_MASK);
+ CHECK_ARGUMENT(curr_type <= SLJIT_ARG_TYPE_F64);
+ if (i > 0) {
+ if (curr_type == 0) {
+ break;
+ }
+ if (curr_type >= SLJIT_ARG_TYPE_F32)
+ fscratches++;
+ else
+ scratches++;
+ } else {
+ if (curr_type >= SLJIT_ARG_TYPE_F32) {
+ CHECK_ARGUMENT(compiler->fscratches > 0);
+ } else if (curr_type >= SLJIT_ARG_TYPE_SW) {
+ CHECK_ARGUMENT(compiler->scratches > 0);
+ }
+ }
+ types >>= SLJIT_DEF_SHIFT;
+ }
+ CHECK_ARGUMENT(compiler->scratches >= scratches);
+ CHECK_ARGUMENT(compiler->fscratches >= fscratches);
+ CHECK_ARGUMENT(types == 0);
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " %s%s ret[%s", jump_names[type & 0xff],
+ !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", call_arg_names[arg_types & SLJIT_DEF_MASK]);
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ if (arg_types) {
+ fprintf(compiler->verbose, "], args[");
+ do {
+ fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]);
+ arg_types >>= SLJIT_DEF_SHIFT;
+ if (arg_types)
+ fprintf(compiler->verbose, ",");
+ } while (arg_types);
+ }
+ fprintf(compiler->verbose, "]\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
@@ -1488,20 +1641,16 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fcmp(struct sljit_compile
CHECK_RETURN_OK;
}
-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 src, sljit_sw srcw)
{
-#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->last_flags = 0;
-#endif
-
if (SLJIT_UNLIKELY(compiler->skip_checks)) {
compiler->skip_checks = 0;
CHECK_RETURN_OK;
}
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT(type >= SLJIT_JUMP && type <= SLJIT_CALL3);
- CHECK_ARGUMENT(type <= SLJIT_CALL0 || (type - SLJIT_CALL0) <= compiler->scratches);
+ CHECK_ARGUMENT(type >= SLJIT_JUMP && type <= SLJIT_FAST_CALL);
FUNCTION_CHECK_SRC(src, srcw);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
@@ -1514,6 +1663,66 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_ijump(struct sljit_compil
CHECK_RETURN_OK;
}
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ sljit_s32 i, types, curr_type, scratches, fscratches;
+
+ CHECK_ARGUMENT(type == SLJIT_CALL || type == SLJIT_CALL_CDECL);
+ FUNCTION_CHECK_SRC(src, srcw);
+
+ types = arg_types;
+ scratches = 0;
+ fscratches = 0;
+ for (i = 0; i < 5; i++) {
+ curr_type = (types & SLJIT_DEF_MASK);
+ CHECK_ARGUMENT(curr_type <= SLJIT_ARG_TYPE_F64);
+ if (i > 0) {
+ if (curr_type == 0) {
+ break;
+ }
+ if (curr_type >= SLJIT_ARG_TYPE_F32)
+ fscratches++;
+ else
+ scratches++;
+ } else {
+ if (curr_type >= SLJIT_ARG_TYPE_F32) {
+ CHECK_ARGUMENT(compiler->fscratches > 0);
+ } else if (curr_type >= SLJIT_ARG_TYPE_SW) {
+ CHECK_ARGUMENT(compiler->scratches > 0);
+ }
+ }
+ types >>= SLJIT_DEF_SHIFT;
+ }
+ CHECK_ARGUMENT(compiler->scratches >= scratches);
+ CHECK_ARGUMENT(compiler->fscratches >= fscratches);
+ CHECK_ARGUMENT(types == 0);
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (SLJIT_UNLIKELY(!!compiler->verbose)) {
+ fprintf(compiler->verbose, " i%s%s ret[%s", jump_names[type & 0xff],
+ !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", call_arg_names[arg_types & SLJIT_DEF_MASK]);
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ if (arg_types) {
+ fprintf(compiler->verbose, "], args[");
+ do {
+ fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]);
+ arg_types >>= SLJIT_DEF_SHIFT;
+ if (arg_types)
+ fprintf(compiler->verbose, ",");
+ } while (arg_types);
+ }
+ fprintf(compiler->verbose, "], ");
+ sljit_verbose_param(compiler, src, srcw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
@@ -1558,6 +1767,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmov(struct sljit_compile
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP)));
CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64);
+
+ CHECK_ARGUMENT(compiler->scratches != -1 && compiler->saveds != -1);
CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg & ~SLJIT_I32_OP));
if (src != SLJIT_IMM) {
CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(src));
@@ -1574,7 +1785,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmov(struct sljit_compile
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
fprintf(compiler->verbose, " cmov%s %s%s, ",
- !(dst_reg & SLJIT_I32_OP) ? "" : ".i",
+ !(dst_reg & SLJIT_I32_OP) ? "" : "32",
jump_names[type & 0xff], JUMP_POSTFIX(type));
sljit_verbose_reg(compiler, dst_reg & ~SLJIT_I32_OP);
fprintf(compiler->verbose, ", ");
@@ -1585,6 +1796,72 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmov(struct sljit_compile
CHECK_RETURN_OK;
}
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_MOV && (type & 0xff) <= SLJIT_MOV_P);
+ CHECK_ARGUMENT(!(type & SLJIT_I32_OP) || ((type & 0xff) != SLJIT_MOV && (type & 0xff) != SLJIT_MOV_U32 && (type & 0xff) != SLJIT_MOV_P));
+ CHECK_ARGUMENT((type & SLJIT_MEM_PRE) || (type & SLJIT_MEM_POST));
+ CHECK_ARGUMENT((type & (SLJIT_MEM_PRE | SLJIT_MEM_POST)) != (SLJIT_MEM_PRE | SLJIT_MEM_POST));
+ CHECK_ARGUMENT((type & ~(0xff | SLJIT_I32_OP | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_PRE | SLJIT_MEM_POST)) == 0);
+
+ FUNCTION_CHECK_SRC_MEM(mem, memw);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(reg));
+
+ CHECK_ARGUMENT((mem & REG_MASK) != SLJIT_UNUSED && (mem & REG_MASK) != reg);
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (!(type & SLJIT_MEM_SUPP) && SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (sljit_emit_mem(compiler, type | SLJIT_MEM_SUPP, reg, mem, memw) == SLJIT_ERR_UNSUPPORTED)
+ fprintf(compiler->verbose, " //");
+
+ fprintf(compiler->verbose, " mem%s.%s%s%s ",
+ !(type & SLJIT_I32_OP) ? "" : "32",
+ (type & SLJIT_MEM_STORE) ? "st" : "ld",
+ op1_names[(type & 0xff) - SLJIT_OP1_BASE],
+ (type & SLJIT_MEM_PRE) ? ".pre" : ".post");
+ sljit_verbose_reg(compiler, reg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_param(compiler, mem, memw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
+{
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ CHECK_ARGUMENT((type & 0xff) == SLJIT_MOV_F64);
+ CHECK_ARGUMENT((type & SLJIT_MEM_PRE) || (type & SLJIT_MEM_POST));
+ CHECK_ARGUMENT((type & (SLJIT_MEM_PRE | SLJIT_MEM_POST)) != (SLJIT_MEM_PRE | SLJIT_MEM_POST));
+ CHECK_ARGUMENT((type & ~(0xff | SLJIT_I32_OP | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_PRE | SLJIT_MEM_POST)) == 0);
+
+ FUNCTION_CHECK_SRC_MEM(mem, memw);
+ CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg));
+#endif
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
+ if (!(type & SLJIT_MEM_SUPP) && SLJIT_UNLIKELY(!!compiler->verbose)) {
+ if (sljit_emit_fmem(compiler, type | SLJIT_MEM_SUPP, freg, mem, memw) == SLJIT_ERR_UNSUPPORTED)
+ fprintf(compiler->verbose, " //");
+
+ fprintf(compiler->verbose, " fmem.%s%s%s ",
+ (type & SLJIT_MEM_STORE) ? "st" : "ld",
+ !(type & SLJIT_I32_OP) ? ".f64" : ".f32",
+ (type & SLJIT_MEM_PRE) ? ".pre" : ".post");
+ sljit_verbose_freg(compiler, freg);
+ fprintf(compiler->verbose, ", ");
+ sljit_verbose_param(compiler, mem, memw);
+ fprintf(compiler->verbose, "\n");
+ }
+#endif
+ CHECK_RETURN_OK;
+}
+
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
{
/* Any offset is allowed. */
@@ -1858,6 +2135,49 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compile
return sljit_emit_jump(compiler, type);
}
+#if !(defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \
+ && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
+ && !(defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ SLJIT_UNUSED_ARG(compiler);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(reg);
+ SLJIT_UNUSED_ARG(mem);
+ SLJIT_UNUSED_ARG(memw);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ return SLJIT_ERR_UNSUPPORTED;
+}
+
+#endif
+
+#if !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
+ && !(defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ SLJIT_UNUSED_ARG(compiler);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(freg);
+ SLJIT_UNUSED_ARG(mem);
+ SLJIT_UNUSED_ARG(memw);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
+
+ return SLJIT_ERR_UNSUPPORTED;
+}
+
+#endif
+
#if !(defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
@@ -1943,12 +2263,12 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code)
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
SLJIT_UNUSED_ARG(compiler);
SLJIT_UNUSED_ARG(options);
- SLJIT_UNUSED_ARG(args);
+ SLJIT_UNUSED_ARG(arg_types);
SLJIT_UNUSED_ARG(scratches);
SLJIT_UNUSED_ARG(saveds);
SLJIT_UNUSED_ARG(fscratches);
@@ -1959,12 +2279,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
SLJIT_UNUSED_ARG(compiler);
SLJIT_UNUSED_ARG(options);
- SLJIT_UNUSED_ARG(args);
+ SLJIT_UNUSED_ARG(arg_types);
SLJIT_UNUSED_ARG(scratches);
SLJIT_UNUSED_ARG(saveds);
SLJIT_UNUSED_ARG(fscratches);
@@ -2109,6 +2429,16 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
return NULL;
}
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+ SLJIT_UNUSED_ARG(compiler);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(arg_types);
+ SLJIT_UNREACHABLE();
+ return NULL;
+}
+
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
@@ -2161,6 +2491,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
return SLJIT_ERR_UNSUPPORTED;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ SLJIT_UNUSED_ARG(compiler);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(arg_types);
+ SLJIT_UNUSED_ARG(src);
+ SLJIT_UNUSED_ARG(srcw);
+ SLJIT_UNREACHABLE();
+ return SLJIT_ERR_UNSUPPORTED;
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
@@ -2187,6 +2530,28 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
return SLJIT_ERR_UNSUPPORTED;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 reg, sljit_s32 mem, sljit_sw memw)
+{
+ SLJIT_UNUSED_ARG(compiler);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(reg);
+ SLJIT_UNUSED_ARG(mem);
+ SLJIT_UNUSED_ARG(memw);
+ SLJIT_UNREACHABLE();
+ return SLJIT_ERR_UNSUPPORTED;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 freg, sljit_s32 mem, sljit_sw memw)
+{
+ SLJIT_UNUSED_ARG(compiler);
+ SLJIT_UNUSED_ARG(type);
+ SLJIT_UNUSED_ARG(freg);
+ SLJIT_UNUSED_ARG(mem);
+ SLJIT_UNUSED_ARG(memw);
+ SLJIT_UNREACHABLE();
+ return SLJIT_ERR_UNSUPPORTED;
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
{
SLJIT_UNUSED_ARG(compiler);
diff --git a/src/3rdparty/pcre2/src/sljit/sljitLir.h b/src/3rdparty/pcre2/src/sljit/sljitLir.h
index 470c84f592..920f6d4f78 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitLir.h
+++ b/src/3rdparty/pcre2/src/sljit/sljitLir.h
@@ -153,8 +153,8 @@ of sljitConfigInternal.h */
is not available at all.
*/
-/* When SLJIT_UNUSED is specified as the destination of sljit_emit_op1 and
- and sljit_emit_op2 operations the result is discarded. If no status
+/* When SLJIT_UNUSED is specified as the destination of sljit_emit_op1
+ or sljit_emit_op2 operations the result is discarded. If no status
flags are set, no instructions are emitted for these operations. Data
prefetch is a special exception, see SLJIT_MOV operation. Other SLJIT
operations do not support SLJIT_UNUSED as a destination operand. */
@@ -213,14 +213,6 @@ of sljitConfigInternal.h */
#define SLJIT_RETURN_REG SLJIT_R0
-/* x86 prefers specific registers for special purposes. In case of shift
- by register it supports only SLJIT_R2 for shift argument
- (which is the src2 argument of sljit_emit_op2). If another register is
- used, sljit must exchange data between registers which cause a minor
- slowdown. Other architectures has no such limitation. */
-
-#define SLJIT_PREF_SHIFT_REG SLJIT_R2
-
/* --------------------------------------------------------------------- */
/* Floating point registers */
/* --------------------------------------------------------------------- */
@@ -258,6 +250,79 @@ of sljitConfigInternal.h */
#define SLJIT_FIRST_SAVED_FLOAT_REG (SLJIT_FS0 - SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS + 1)
/* --------------------------------------------------------------------- */
+/* Argument type definitions */
+/* --------------------------------------------------------------------- */
+
+/* Argument type definitions.
+ Used by SLJIT_[DEF_]ARGx and SLJIT_[DEF]_RET macros. */
+
+#define SLJIT_ARG_TYPE_VOID 0
+#define SLJIT_ARG_TYPE_SW 1
+#define SLJIT_ARG_TYPE_UW 2
+#define SLJIT_ARG_TYPE_S32 3
+#define SLJIT_ARG_TYPE_U32 4
+#define SLJIT_ARG_TYPE_F32 5
+#define SLJIT_ARG_TYPE_F64 6
+
+/* The following argument type definitions are used by sljit_emit_enter,
+ sljit_set_context, sljit_emit_call, and sljit_emit_icall functions.
+ The following return type definitions are used by sljit_emit_call
+ and sljit_emit_icall functions.
+
+ When a function is called, the first integer argument must be placed
+ in SLJIT_R0, the second in SLJIT_R1, and so on. Similarly the first
+ floating point argument must be placed in SLJIT_FR0, the second in
+ SLJIT_FR1, and so on.
+
+ Example function definition:
+ sljit_f32 SLJIT_FUNC example_c_callback(sljit_sw arg_a,
+ sljit_f64 arg_b, sljit_u32 arg_c, sljit_f32 arg_d);
+
+ Argument type definition:
+ SLJIT_DEF_RET(SLJIT_ARG_TYPE_F32)
+ | SLJIT_DEF_ARG1(SLJIT_ARG_TYPE_SW) | SLJIT_DEF_ARG2(SLJIT_ARG_TYPE_F64)
+ | SLJIT_DEF_ARG3(SLJIT_ARG_TYPE_U32) | SLJIT_DEF_ARG2(SLJIT_ARG_TYPE_F32)
+
+ Short form of argument type definition:
+ SLJIT_RET(F32) | SLJIT_ARG1(SW) | SLJIT_ARG2(F64)
+ | SLJIT_ARG3(S32) | SLJIT_ARG4(F32)
+
+ Argument passing:
+ arg_a must be placed in SLJIT_R0
+ arg_c must be placed in SLJIT_R1
+ arg_b must be placed in SLJIT_FR0
+ arg_d must be placed in SLJIT_FR1
+
+Note:
+ The SLJIT_ARG_TYPE_VOID type is only supported by
+ SLJIT_DEF_RET, and SLJIT_ARG_TYPE_VOID is also the
+ default value when SLJIT_DEF_RET is not specified. */
+#define SLJIT_DEF_SHIFT 4
+#define SLJIT_DEF_RET(type) (type)
+#define SLJIT_DEF_ARG1(type) ((type) << SLJIT_DEF_SHIFT)
+#define SLJIT_DEF_ARG2(type) ((type) << (2 * SLJIT_DEF_SHIFT))
+#define SLJIT_DEF_ARG3(type) ((type) << (3 * SLJIT_DEF_SHIFT))
+#define SLJIT_DEF_ARG4(type) ((type) << (4 * SLJIT_DEF_SHIFT))
+
+/* Short form of the macros above.
+
+ For example the following definition:
+ SLJIT_DEF_RET(SLJIT_ARG_TYPE_SW) | SLJIT_DEF_ARG1(SLJIT_ARG_TYPE_F32)
+
+ can be shortened to:
+ SLJIT_RET(SW) | SLJIT_ARG1(F32)
+
+Note:
+ The VOID type is only supported by SLJIT_RET, and
+ VOID is also the default value when SLJIT_RET is
+ not specified. */
+#define SLJIT_RET(type) SLJIT_DEF_RET(SLJIT_ARG_TYPE_ ## type)
+#define SLJIT_ARG1(type) SLJIT_DEF_ARG1(SLJIT_ARG_TYPE_ ## type)
+#define SLJIT_ARG2(type) SLJIT_DEF_ARG2(SLJIT_ARG_TYPE_ ## type)
+#define SLJIT_ARG3(type) SLJIT_DEF_ARG3(SLJIT_ARG_TYPE_ ## type)
+#define SLJIT_ARG4(type) SLJIT_DEF_ARG4(SLJIT_ARG_TYPE_ ## type)
+
+/* --------------------------------------------------------------------- */
/* Main structures and functions */
/* --------------------------------------------------------------------- */
@@ -331,6 +396,7 @@ struct sljit_compiler {
sljit_s32 args;
sljit_s32 locals_offset;
sljit_s32 saveds_offset;
+ sljit_s32 stack_tmp_size;
#endif
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -356,15 +422,8 @@ struct sljit_compiler {
sljit_uw shift_imm;
#endif
-#if (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64)
- sljit_s32 cache_arg;
- sljit_sw cache_argw;
-#endif
-
#if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
sljit_sw imm;
- sljit_s32 cache_arg;
- sljit_sw cache_argw;
#endif
#if (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
@@ -499,14 +558,10 @@ static SLJIT_INLINE sljit_uw sljit_get_generated_code_size(struct sljit_compiler
#define SLJIT_HAS_FPU 0
/* [Limitation] Some registers are virtual registers. */
#define SLJIT_HAS_VIRTUAL_REGISTERS 1
-/* [Emulated] Some forms of move with pre update is supported. */
-#define SLJIT_HAS_PRE_UPDATE 2
/* [Emulated] Count leading zero is supported. */
-#define SLJIT_HAS_CLZ 3
+#define SLJIT_HAS_CLZ 2
/* [Emulated] Conditional move is supported. */
-#define SLJIT_HAS_CMOV 4
-/* [Limitation] [Emulated] Shifting with register is limited to SLJIT_PREF_SHIFT_REG. */
-#define SLJIT_HAS_PREF_SHIFT_REG 5
+#define SLJIT_HAS_CMOV 3
#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
/* [Not emulated] SSE2 support is available on x86. */
@@ -519,27 +574,28 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
error, they return with SLJIT_SUCCESS. */
/*
- The executable code is a function call from the viewpoint of the C
+ The executable code is a function from the viewpoint of the C
language. The function calls must obey to the ABI (Application
Binary Interface) of the platform, which specify the purpose of
- all machine registers and stack handling among other things. The
+ machine registers and stack handling among other things. The
sljit_emit_enter function emits the necessary instructions for
setting up a new context for the executable code and moves function
arguments to the saved registers. Furthermore the options argument
can be used to pass configuration options to the compiler. The
available options are listed before sljit_emit_enter.
- The number of sljit_sw arguments passed to the generated function
- are specified in the "args" parameter. The number of arguments must
- be less than or equal to 3. The first argument goes to SLJIT_S0,
- the second goes to SLJIT_S1 and so on. The register set used by
- the function must be declared as well. The number of scratch and
- saved registers used by the function must be passed to sljit_emit_enter.
- Only R registers between R0 and "scratches" argument can be used
- later. E.g. if "scratches" is set to 2, the register set will be
- limited to R0 and R1. The S registers and the floating point
+ The function argument list is the combination of SLJIT_ARGx
+ (SLJIT_DEF_ARG1) macros. Currently maximum 3 SW / UW
+ (SLJIT_ARG_TYPE_SW / LJIT_ARG_TYPE_UW) arguments are supported.
+ The first argument goes to SLJIT_S0, the second goes to SLJIT_S1
+ and so on. The register set used by the function must be declared
+ as well. The number of scratch and saved registers used by the
+ function must be passed to sljit_emit_enter. Only R registers
+ between R0 and "scratches" argument can be used later. E.g. if
+ "scratches" is set to 2, the scratch register set will be limited
+ to SLJIT_R0 and SLJIT_R1. The S registers and the floating point
registers ("fscratches" and "fsaveds") are specified in a similar
- way. The sljit_emit_enter is also capable of allocating a stack
+ manner. The sljit_emit_enter is also capable of allocating a stack
space for local variables. The "local_size" argument contains the
size in bytes of this local area and its staring address is stored
in SLJIT_SP. The memory area between SLJIT_SP (inclusive) and
@@ -566,7 +622,7 @@ offset 0 is aligned to sljit_f64. Otherwise it is aligned to sljit_sw. */
#define SLJIT_MAX_LOCAL_SIZE 65536
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size);
/* The machine code has a context (which contains the local stack space size,
@@ -580,7 +636,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
the previous context. */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size);
/* Return from machine code. The op argument can be SLJIT_UNUSED which means the
@@ -592,26 +648,31 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw);
-/* Fast calling mechanism for utility functions (see SLJIT_FAST_CALL). All registers and
- even the stack frame is passed to the callee. The return address is preserved in
- dst/dstw by sljit_emit_fast_enter (the type of the value stored by this function
- is sljit_p), and sljit_emit_fast_return can use this as a return value later. */
+/* Generating entry and exit points for fast call functions (see SLJIT_FAST_CALL).
+ Both sljit_emit_fast_enter and sljit_emit_fast_return functions preserve the
+ values of all registers and stack frame. The return address is stored in the
+ dst argument of sljit_emit_fast_enter, and this return address can be passed
+ to sljit_emit_fast_return to continue the execution after the fast call.
-/* Note: only for sljit specific, non ABI compilant calls. Fast, since only a few machine
- instructions are needed. Excellent for small uility functions, where saving registers
- and setting up a new stack frame would cost too much performance. However, it is still
- possible to return to the address of the caller (or anywhere else). */
+ Fast calls are cheap operations (usually only a single call instruction is
+ emitted) but they do not preserve any registers. However the callee function
+ can freely use / update any registers and stack values which can be
+ efficiently exploited by various optimizations. Registers can be saved
+ manually by the callee function if needed.
-/* Note: may destroy flags. */
+ Although returning to different address by sljit_emit_fast_return is possible,
+ this address usually cannot be predicted by the return address predictor of
+ modern CPUs which may reduce performance. Furthermore using sljit_emit_ijump
+ to return is also inefficient since return address prediction is usually
+ triggered by a specific form of ijump.
-/* Note: although sljit_emit_fast_return could be replaced by an ijump, it is not suggested,
- since many architectures do clever branch prediction on call / return instruction pairs. */
+ Flags: - (does not modify flags). */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw);
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw);
/*
- Source and destination values for arithmetical instructions
+ Source and destination operands for arithmetical instructions
imm - a simple immediate value (cannot be used as a destination)
reg - any of the registers (immediate argument must be 0)
[imm] - absolute immediate memory address
@@ -652,6 +713,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
arm-t2: [reg+imm], -255 <= imm <= 4095
[reg+(reg<<imm)] is supported
Write back is supported only for [reg+imm], where -255 <= imm <= 255
+ arm64: [reg+imm], -256 <= imm <= 255, 0 <= aligned imm <= 4095 * alignment
+ [reg+(reg<<imm)] is supported
+ Write back is supported only for [reg+imm], where -256 <= imm <= 255
ppc: [reg+imm], -65536 <= imm <= 65535. 64 bit loads/stores and 32 bit
signed load on 64 bit requires immediates divisible by 4.
[reg+imm] is not supported for signed 8 bit values.
@@ -663,8 +727,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
[reg+reg] is supported
*/
-/* Register output: simply the name of the register.
- For destination, you can use SLJIT_UNUSED as well. */
+/* Macros for specifying operand types. */
#define SLJIT_MEM 0x80
#define SLJIT_MEM0() (SLJIT_MEM)
#define SLJIT_MEM1(r1) (SLJIT_MEM | (r1))
@@ -833,43 +896,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
S32 - signed int (32 bit) data transfer
P - pointer (sljit_p) data transfer
- U = move with update (pre form). If source or destination defined as
- SLJIT_MEM1(r1) or SLJIT_MEM2(r1, r2), r1 is increased by the
- offset part of the address.
-
- Register arguments and base registers can only be used once for move
- with update instructions. The shift value of SLJIT_MEM2 addressing
- mode must also be 0. Reason: SLJIT_MOVU instructions are expected to
- be in high-performance loops where complex instruction emulation
- would be too costly.
-
- Examples for invalid move with update instructions:
-
- sljit_emit_op1(..., SLJIT_MOVU_U8,
- SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), 8);
- sljit_emit_op1(..., SLJIT_MOVU_U8,
- SLJIT_MEM2(SLJIT_R1, SLJIT_R0), 0, SLJIT_R0, 0);
- sljit_emit_op1(..., SLJIT_MOVU_U8,
- SLJIT_MEM2(SLJIT_R0, SLJIT_R1), 0, SLJIT_MEM1(SLJIT_R0), 8);
- sljit_emit_op1(..., SLJIT_MOVU_U8,
- SLJIT_MEM2(SLJIT_R0, SLJIT_R1), 0, SLJIT_MEM2(SLJIT_R1, SLJIT_R0), 0);
- sljit_emit_op1(..., SLJIT_MOVU_U8,
- SLJIT_R2, 0, SLJIT_MEM2(SLJIT_R0, SLJIT_R1), 1);
-
- The following example is valid, since only the offset register is
- used multiple times:
-
- sljit_emit_op1(..., SLJIT_MOVU_U8,
- SLJIT_MEM2(SLJIT_R0, SLJIT_R2), 0, SLJIT_MEM2(SLJIT_R1, SLJIT_R2), 0);
-
- If the destination of a MOV without update instruction is SLJIT_UNUSED
- and the source operand is a memory address the compiler emits a prefetch
- instruction if this instruction is supported by the current CPU.
- Higher data sizes bring the data closer to the core: a MOV with word
- size loads the data into a higher level cache than a byte size. Otherwise
- the type does not affect the prefetch instruction. Furthermore a prefetch
- instruction never fails, so it can be used to prefetch a data from an
- address and check whether that address is NULL afterwards.
+ If the destination of a MOV instruction is SLJIT_UNUSED and the source
+ operand is a memory address the compiler emits a prefetch instruction
+ if this instruction is supported by the current CPU. Higher data sizes
+ bring the data closer to the core: a MOV with word size loads the data
+ into a higher level cache than a byte size. Otherwise the type does not
+ affect the prefetch instruction. Furthermore a prefetch instruction
+ never fails, so it can be used to prefetch a data from an address and
+ check whether that address is NULL afterwards.
*/
/* Flags: - (does not modify flags) */
@@ -894,41 +928,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
#define SLJIT_MOV_S32 (SLJIT_OP1_BASE + 6)
/* Flags: - (does not modify flags) */
#define SLJIT_MOV32 (SLJIT_MOV_S32 | SLJIT_I32_OP)
-/* Flags: - (does not modify flags) */
+/* Flags: - (does not modify flags)
+ Note: load a pointer sized data, useful on x32 (a 32 bit mode on x86-64
+ where all x64 features are available, e.g. 16 register) or similar
+ compiling modes */
#define SLJIT_MOV_P (SLJIT_OP1_BASE + 7)
-/* Flags: - (may destroy flags) */
-#define SLJIT_MOVU (SLJIT_OP1_BASE + 8)
-/* Flags: - (may destroy flags) */
-#define SLJIT_MOVU_U8 (SLJIT_OP1_BASE + 9)
-#define SLJIT_MOVU32_U8 (SLJIT_MOVU_U8 | SLJIT_I32_OP)
-/* Flags: - (may destroy flags) */
-#define SLJIT_MOVU_S8 (SLJIT_OP1_BASE + 10)
-#define SLJIT_MOVU32_S8 (SLJIT_MOVU_S8 | SLJIT_I32_OP)
-/* Flags: - (may destroy flags) */
-#define SLJIT_MOVU_U16 (SLJIT_OP1_BASE + 11)
-#define SLJIT_MOVU32_U16 (SLJIT_MOVU_U16 | SLJIT_I32_OP)
-/* Flags: - (may destroy flags) */
-#define SLJIT_MOVU_S16 (SLJIT_OP1_BASE + 12)
-#define SLJIT_MOVU32_S16 (SLJIT_MOVU_S16 | SLJIT_I32_OP)
-/* Flags: - (may destroy flags)
- Note: no SLJIT_MOVU32_U32 form, since it is the same as SLJIT_MOVU32 */
-#define SLJIT_MOVU_U32 (SLJIT_OP1_BASE + 13)
-/* Flags: - (may destroy flags)
- Note: no SLJIT_MOVU32_S32 form, since it is the same as SLJIT_MOVU32 */
-#define SLJIT_MOVU_S32 (SLJIT_OP1_BASE + 14)
-/* Flags: - (may destroy flags) */
-#define SLJIT_MOVU32 (SLJIT_MOVU_S32 | SLJIT_I32_OP)
-/* Flags: - (may destroy flags) */
-#define SLJIT_MOVU_P (SLJIT_OP1_BASE + 15)
-/* Flags: Z */
-#define SLJIT_NOT (SLJIT_OP1_BASE + 16)
+/* Flags: Z
+ Note: immediate source argument is not supported */
+#define SLJIT_NOT (SLJIT_OP1_BASE + 8)
#define SLJIT_NOT32 (SLJIT_NOT | SLJIT_I32_OP)
-/* Flags: Z | OVERFLOW */
-#define SLJIT_NEG (SLJIT_OP1_BASE + 17)
+/* Flags: Z | OVERFLOW
+ Note: immediate source argument is not supported */
+#define SLJIT_NEG (SLJIT_OP1_BASE + 9)
#define SLJIT_NEG32 (SLJIT_NEG | SLJIT_I32_OP)
/* Count leading zeroes
- Flags: - (may destroy flags) */
-#define SLJIT_CLZ (SLJIT_OP1_BASE + 18)
+ Flags: - (may destroy flags)
+ Note: immediate source argument is not supported */
+#define SLJIT_CLZ (SLJIT_OP1_BASE + 10)
#define SLJIT_CLZ32 (SLJIT_CLZ | SLJIT_I32_OP)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1136,25 +1152,32 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi
/* Unconditional jump types. */
#define SLJIT_JUMP 24
+ /* Fast calling method. See sljit_emit_fast_enter / sljit_emit_fast_return. */
#define SLJIT_FAST_CALL 25
-#define SLJIT_CALL0 26
-#define SLJIT_CALL1 27
-#define SLJIT_CALL2 28
-#define SLJIT_CALL3 29
-
-/* Fast calling method. See sljit_emit_fast_enter / sljit_emit_fast_return. */
+ /* Called function must be declared with the SLJIT_FUNC attribute. */
+#define SLJIT_CALL 26
+ /* Called function must be decalred with cdecl attribute.
+ This is the default attribute for C functions. */
+#define SLJIT_CALL_CDECL 27
/* The target can be changed during runtime (see: sljit_set_jump_addr). */
#define SLJIT_REWRITABLE_JUMP 0x1000
/* Emit a jump instruction. The destination is not set, only the type of the jump.
- type must be between SLJIT_EQUAL and SLJIT_CALL3
+ type must be between SLJIT_EQUAL and SLJIT_FAST_CALL
type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
- Flags: does not modify flags for conditional and unconditional
- jumps but destroy all flags for calls. */
+ Flags: does not modify flags. */
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type);
+/* Emit a C compiler (ABI) compatible function call.
+ type must be SLJIT_CALL or SLJIT_CALL_CDECL
+ type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
+ arg_types is the combination of SLJIT_RET / SLJIT_ARGx (SLJIT_DEF_RET / SLJIT_DEF_ARGx) macros
+
+ Flags: destroy all flags. */
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 arg_types);
+
/* Basic arithmetic comparison. In most architectures it is implemented as
an SLJIT_SUB operation (with SLJIT_UNUSED destination and setting
appropriate flags) followed by a sljit_emit_jump. However some
@@ -1162,6 +1185,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
It is suggested to use this comparison form when appropriate.
type must be between SLJIT_EQUAL and SLJIT_I_SIG_LESS_EQUAL
type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
+
Flags: may destroy flags. */
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 src1, sljit_sw src1w,
@@ -1186,15 +1210,23 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_label(struct sljit_jump *jump, struct sl
/* Set the destination address of the jump to this label. */
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_target(struct sljit_jump *jump, sljit_uw target);
-/* Call function or jump anywhere. Both direct and indirect form
- type must be between SLJIT_JUMP and SLJIT_CALL3
- Direct form: set src to SLJIT_IMM() and srcw to the address
- Indirect form: any other valid addressing mode
+/* Emit an indirect jump or fast call. Both direct and indirect form
+ Direct form: set src to SLJIT_IMM() and srcw to the address
+ Indirect form: any other valid addressing mode
+ type must be between SLJIT_JUMP and SLJIT_FAST_CALL
- Flags: does not modify flags for unconditional jumps but
- destroy all flags for calls. */
+ Flags: does not modify flags. */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw);
+/* Emit a C compiler (ABI) compatible function call.
+ Direct form: set src to SLJIT_IMM() and srcw to the address
+ Indirect form: any other valid addressing mode
+ type must be SLJIT_CALL or SLJIT_CALL_CDECL
+ arg_types is the combination of SLJIT_RET / SLJIT_ARGx (SLJIT_DEF_RET / SLJIT_DEF_ARGx) macros
+
+ Flags: destroy all flags. */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 arg_types, sljit_s32 src, sljit_sw srcw);
+
/* Perform the operation using the conditional flags as the second argument.
Type must always be between SLJIT_EQUAL and SLJIT_ORDERED_F64. The value
represented by the type is 1, if the condition represented by the type
@@ -1213,7 +1245,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
/* Emit a conditional mov instruction which moves source to destination,
if the condition is satisfied. Unlike other arithmetic operations this
- instruction does not support memory accesses.
+ instruction does not support memory access.
type must be between SLJIT_EQUAL and SLJIT_ORDERED_F64
dst_reg must be a valid register and it can be combined
@@ -1225,6 +1257,51 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
sljit_s32 dst_reg,
sljit_s32 src, sljit_sw srcw);
+/* The following flags are used by sljit_emit_mem() and sljit_emit_fmem(). */
+
+/* When SLJIT_MEM_SUPP is passed, no instructions are emitted.
+ Instead the function returns with SLJIT_SUCCESS if the instruction
+ form is supported and SLJIT_ERR_UNSUPPORTED otherwise. This flag
+ allows runtime checking of available instruction forms. */
+#define SLJIT_MEM_SUPP 0x0200
+/* Memory load operation. This is the default. */
+#define SLJIT_MEM_LOAD 0x0000
+/* Memory store operation. */
+#define SLJIT_MEM_STORE 0x0400
+/* Base register is updated before the memory access. */
+#define SLJIT_MEM_PRE 0x0800
+/* Base register is updated after the memory access. */
+#define SLJIT_MEM_POST 0x1000
+
+/* Emit a single memory load or store with update instruction. When the
+ requested instruction from is not supported by the CPU, it returns
+ with SLJIT_ERR_UNSUPPORTED instead of emulating the instruction. This
+ allows specializing tight loops based on the supported instruction
+ forms (see SLJIT_MEM_SUPP flag).
+
+ type must be between SLJIT_MOV and SLJIT_MOV_P and can be
+ combined with SLJIT_MEM_* flags. Either SLJIT_MEM_PRE
+ or SLJIT_MEM_POST must be specified.
+ reg is the source or destination register, and must be
+ different from the base register of the mem operand
+ mem must be a SLJIT_MEM1() or SLJIT_MEM2() operand
+
+ Flags: - (does not modify flags) */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw);
+
+/* Same as sljit_emit_mem except the followings:
+
+ type must be SLJIT_MOV_F64 or SLJIT_MOV_F32 and can be
+ combined with SLJIT_MEM_* flags. Either SLJIT_MEM_PRE
+ or SLJIT_MEM_POST must be specified.
+ freg is the source or destination floating point register */
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw);
+
/* Copies the base address of SLJIT_SP + offset to dst. The offset can be
anything to negate the effect of relative addressing. For example if an
array of sljit_sw values is stored on the stack from offset 0x40, and R0
@@ -1270,58 +1347,58 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void);
#if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
/* This global lock is useful to compile common functions. */
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void);
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void);
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void);
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void);
#endif
#if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK)
-/* The sljit_stack is a utility extension of sljit, which provides
- a top-down stack. The stack starts at base and goes down to
- max_limit, so the memory region for this stack is between
- max_limit (inclusive) and base (exclusive). However the
- application can only use the region between limit (inclusive)
- and base (exclusive). The sljit_stack_resize can be used to
- extend this region up to max_limit.
+/* The sljit_stack structure and its manipulation functions provides
+ an implementation for a top-down stack. The stack top is stored
+ in the end field of the sljit_stack structure and the stack goes
+ down to the min_start field, so the memory region reserved for
+ this stack is between min_start (inclusive) and end (exclusive)
+ fields. However the application can only use the region between
+ start (inclusive) and end (exclusive) fields. The sljit_stack_resize
+ function can be used to extend this region up to min_start.
This feature uses the "address space reserve" feature of modern
- operating systems, so instead of allocating a huge memory block
- applications can allocate a small region and extend it later
- without moving the memory area. Hence pointers can be stored
- in this area. */
-
-/* Note: base and max_limit fields are aligned to PAGE_SIZE bytes
- (usually 4 Kbyte or more).
- Note: stack should grow in larger steps, e.g. 4Kbyte, 16Kbyte or more.
- Note: this structure may not be supported by all operating systems.
- Some kind of fallback mechanism is suggested when SLJIT_UTIL_STACK
- is not defined. */
+ operating systems. Instead of allocating a large memory block
+ applications can allocate a small memory region and extend it
+ later without moving the content of the memory area. Therefore
+ after a successful resize by sljit_stack_resize all pointers into
+ this region are still valid.
+
+ Note:
+ this structure may not be supported by all operating systems.
+ end and max_limit fields are aligned to PAGE_SIZE bytes (usually
+ 4 Kbyte or more).
+ stack should grow in larger steps, e.g. 4Kbyte, 16Kbyte or more. */
struct sljit_stack {
/* User data, anything can be stored here.
- Starting with the same value as base. */
+ Initialized to the same value as the end field. */
sljit_u8 *top;
- /* These members are read only. */
- sljit_u8 *base;
- sljit_u8 *limit;
- sljit_u8 *max_limit;
+/* These members are read only. */
+ /* End address of the stack */
+ sljit_u8 *end;
+ /* Current start address of the stack. */
+ sljit_u8 *start;
+ /* Lowest start address of the stack. */
+ sljit_u8 *min_start;
};
-/* Returns NULL if unsuccessful.
- Note: max_limit contains the maximum stack size in bytes.
- Note: limit contains the starting stack size in bytes.
- Note: the top field is initialized to base.
+/* Allocates a new stack. Returns NULL if unsuccessful.
Note: see sljit_create_compiler for the explanation of allocator_data. */
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit, void *allocator_data);
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack *stack, void *allocator_data);
-
-/* Can be used to increase (allocate) or decrease (free) the memory area.
- Returns with a non-zero value if unsuccessful. If new_limit is greater than
- max_limit, it will fail. It is very easy to implement a stack data structure,
- since the growth ratio can be added to the current limit, and sljit_stack_resize
- will do all the necessary checks. The fields of the stack are not changed if
- sljit_stack_resize fails. */
-SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack *stack, sljit_u8 *new_limit);
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_FUNC sljit_allocate_stack(sljit_uw start_size, sljit_uw max_size, void *allocator_data);
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_free_stack(struct sljit_stack *stack, void *allocator_data);
+
+/* Can be used to increase (extend) or decrease (shrink) the stack
+ memory area. Returns with new_start if successful and NULL otherwise.
+ It always fails if new_start is less than min_start or greater or equal
+ than end fields. The fields of the stack are not changed if the returned
+ value is NULL (the current memory content is never lost). */
+SLJIT_API_FUNC_ATTRIBUTE sljit_u8 *SLJIT_FUNC sljit_stack_resize(struct sljit_stack *stack, sljit_u8 *new_start);
#endif /* (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) */
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c
index 745da99f61..6d61eed9a7 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_32.c
@@ -24,12 +24,18 @@
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifdef __SOFTFP__
+#define ARM_ABI_INFO " ABI:softfp"
+#else
+#define ARM_ABI_INFO " ABI:hardfp"
+#endif
+
SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
{
#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
- return "ARMv7" SLJIT_CPUINFO;
+ return "ARMv7" SLJIT_CPUINFO ARM_ABI_INFO;
#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- return "ARMv5" SLJIT_CPUINFO;
+ return "ARMv5" SLJIT_CPUINFO ARM_ABI_INFO;
#else
#error "Internal error: Unknown ARM architecture"
#endif
@@ -40,8 +46,8 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
#define TMP_PC (SLJIT_NUMBER_OF_REGISTERS + 4)
-#define TMP_FREG1 (0)
-#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
/* In ARM instruction words.
Cache lines are usually 32 byte aligned. */
@@ -55,7 +61,11 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
/* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
- 0, 0, 1, 2, 3, 11, 10, 9, 8, 7, 6, 5, 4, 13, 14, 12, 15
+ 0, 0, 1, 2, 3, 11, 10, 9, 8, 7, 6, 5, 4, 13, 12, 14, 15
+};
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
+ 0, 0, 1, 2, 3, 4, 5, 6, 7
};
#define RM(rm) (reg_map[rm])
@@ -72,32 +82,31 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
#define CONDITIONAL 0xe0000000
#define PUSH_POOL 0xff000000
-/* DP - Data Processing instruction (use with EMIT_DATA_PROCESS_INS). */
-#define ADC_DP 0x5
-#define ADD_DP 0x4
-#define AND_DP 0x0
+#define ADC 0xe0a00000
+#define ADD 0xe0800000
+#define AND 0xe0000000
#define B 0xea000000
-#define BIC_DP 0xe
+#define BIC 0xe1c00000
#define BL 0xeb000000
#define BLX 0xe12fff30
#define BX 0xe12fff10
#define CLZ 0xe16f0f10
-#define CMN_DP 0xb
-#define CMP_DP 0xa
+#define CMN 0xe1600000
+#define CMP 0xe1400000
#define BKPT 0xe1200070
-#define EOR_DP 0x1
-#define MOV_DP 0xd
+#define EOR 0xe0200000
+#define MOV 0xe1a00000
#define MUL 0xe0000090
-#define MVN_DP 0xf
+#define MVN 0xe1e00000
#define NOP 0xe1a00000
-#define ORR_DP 0xc
+#define ORR 0xe1800000
#define PUSH 0xe92d0000
#define POP 0xe8bd0000
-#define RSB_DP 0x3
-#define RSC_DP 0x7
-#define SBC_DP 0x6
+#define RSB 0xe0600000
+#define RSC 0xe0e00000
+#define SBC 0xe0c00000
#define SMULL 0xe0c00090
-#define SUB_DP 0x2
+#define SUB 0xe0400000
#define UMULL 0xe0800090
#define VABS_F32 0xeeb00ac0
#define VADD_F32 0xee300a00
@@ -108,6 +117,7 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
#define VDIV_F32 0xee800a00
#define VMOV_F32 0xeeb00a40
#define VMOV 0xee000a10
+#define VMOV2 0xec400a10
#define VMRS 0xeef1fa10
#define VMUL_F32 0xee200a00
#define VNEG_F32 0xeeb10a40
@@ -260,7 +270,9 @@ static SLJIT_INLINE sljit_s32 emit_blx(struct sljit_compiler *compiler)
{
/* Must follow tightly the previous instruction (to be able to convert it to bl instruction). */
SLJIT_ASSERT(compiler->cpool_diff == CONST_POOL_EMPTY || compiler->size - compiler->cpool_diff < MAX_DIFFERENCE(4092));
- return push_inst(compiler, BLX | RM(TMP_REG2));
+ SLJIT_ASSERT(reg_map[TMP_REG1] != 14);
+
+ return push_inst(compiler, BLX | RM(TMP_REG1));
}
static sljit_uw patch_pc_relative_loads(sljit_uw *last_pc_patch, sljit_uw *code_ptr, sljit_uw* const_pool, sljit_uw cpool_size)
@@ -825,7 +837,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
return 1;
#endif
- case SLJIT_HAS_PRE_UPDATE:
case SLJIT_HAS_CLZ:
case SLJIT_HAS_CMOV:
return 1;
@@ -840,18 +851,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
/* --------------------------------------------------------------------- */
/* Creates an index in data_transfer_insts array. */
-#define WORD_DATA 0x00
-#define BYTE_DATA 0x01
-#define HALF_DATA 0x02
-#define PRELOAD_DATA 0x03
-#define SIGNED_DATA 0x04
+#define WORD_SIZE 0x00
+#define BYTE_SIZE 0x01
+#define HALF_SIZE 0x02
+#define PRELOAD 0x03
+#define SIGNED 0x04
#define LOAD_DATA 0x08
-/* emit_op inp_flags.
- WRITE_BACK must be the first, since it is a flag. */
-#define WRITE_BACK 0x10
-#define ALLOW_IMM 0x20
-#define ALLOW_INV_IMM 0x40
+/* Flag bits for emit_op. */
+#define ALLOW_IMM 0x10
+#define ALLOW_INV_IMM 0x20
#define ALLOW_ANY_IMM (ALLOW_IMM | ALLOW_INV_IMM)
/* s/l - store/load (1 bit)
@@ -872,15 +881,15 @@ static const sljit_uw data_transfer_insts[16] = {
/* l u w */ 0xe5100000 /* ldr */,
/* l u b */ 0xe5500000 /* ldrb */,
/* l u h */ 0xe11000b0 /* ldrh */,
-/* l u p */ 0xf5500000 /* preload data */,
+/* l u p */ 0xf5500000 /* preload */,
/* l s w */ 0xe5100000 /* ldr */,
/* l s b */ 0xe11000d0 /* ldrsb */,
/* l s h */ 0xe11000f0 /* ldrsh */,
/* l s N */ 0x00000000 /* not allowed */,
};
-#define EMIT_DATA_TRANSFER(type, add, wb, target_reg, base_reg, arg) \
- (data_transfer_insts[(type) & 0xf] | ((add) << 23) | ((wb) << (21 - 4)) | RD(target_reg) | RN(base_reg) | (arg))
+#define EMIT_DATA_TRANSFER(type, add, target_reg, base_reg, arg) \
+ (data_transfer_insts[(type) & 0xf] | ((add) << 23) | RD(target_reg) | RN(base_reg) | (arg))
/* Normal ldr/str instruction.
Type2: ldrsb, ldrh, ldrsh */
@@ -889,25 +898,21 @@ static const sljit_uw data_transfer_insts[16] = {
#define TYPE2_TRANSFER_IMM(imm) \
(((imm) & 0xf) | (((imm) & 0xf0) << 4) | (1 << 22))
-/* Condition: AL. */
-#define EMIT_DATA_PROCESS_INS(opcode, set_flags, dst, src1, src2) \
- (0xe0000000 | ((opcode) << 21) | (set_flags) | RD(dst) | RN(src1) | (src2))
-
static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 inp_flags,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w);
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 size, i, tmp;
+ sljit_s32 args, size, i, tmp;
sljit_uw push;
CHECK_ERROR();
- CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
/* Push saved registers, temporary registers
stmdb sp!, {..., lr} */
@@ -929,25 +934,27 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
if (local_size > 0)
FAIL_IF(emit_op(compiler, SLJIT_SUB, ALLOW_IMM, SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size));
+ args = get_arg_count(arg_types);
+
if (args >= 1)
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, SLJIT_S0, SLJIT_UNUSED, RM(SLJIT_R0))));
+ FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S0) | RM(SLJIT_R0)));
if (args >= 2)
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, SLJIT_S1, SLJIT_UNUSED, RM(SLJIT_R1))));
+ FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S1) | RM(SLJIT_R1)));
if (args >= 3)
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, SLJIT_S2, SLJIT_UNUSED, RM(SLJIT_R2))));
+ FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S2) | RM(SLJIT_R2)));
return SLJIT_SUCCESS;
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
sljit_s32 size;
CHECK_ERROR();
- CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
compiler->local_size = ((size + local_size + 7) & ~7) - size;
@@ -1009,12 +1016,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
SLJIT_ASSERT(!(flags & ARGS_SWAPPED)); \
\
if (compiler->shift_imm != 0) \
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, \
- dst, SLJIT_UNUSED, (compiler->shift_imm << 7) | (opcode << 5) | RM(src2))); \
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, dst, SLJIT_UNUSED, RM(src2))); \
+ return push_inst(compiler, MOV | (flags & SET_FLAGS) | \
+ RD(dst) | (compiler->shift_imm << 7) | (opcode << 5) | RM(src2)); \
+ return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | RM(src2)); \
} \
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, \
- dst, SLJIT_UNUSED, (reg_map[(flags & ARGS_SWAPPED) ? src1 : src2] << 8) | (opcode << 5) | 0x10 | RM((flags & ARGS_SWAPPED) ? src2 : src1)));
+ return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | \
+ (reg_map[(flags & ARGS_SWAPPED) ? src1 : src2] << 8) | (opcode << 5) | 0x10 | RM((flags & ARGS_SWAPPED) ? src2 : src1));
static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
sljit_s32 dst, sljit_s32 src1, sljit_s32 src2)
@@ -1024,10 +1031,9 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED));
if (dst != src2) {
if (src2 & SRC2_IMM) {
- return push_inst(compiler, EMIT_DATA_PROCESS_INS((flags & INV_IMM) ? MVN_DP : MOV_DP, 0,
- dst, SLJIT_UNUSED, src2));
+ return push_inst(compiler, ((flags & INV_IMM) ? MVN : MOV) | RD(dst) | src2);
}
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, RM(src2)));
+ return push_inst(compiler, MOV | RD(dst) | RM(src2));
}
return SLJIT_SUCCESS;
@@ -1037,17 +1043,16 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
if (flags & MOVE_REG_CONV) {
#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
if (op == SLJIT_MOV_U8)
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(AND_DP, 0, dst, src2, SRC2_IMM | 0xff));
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (24 << 7) | RM(src2))));
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (24 << 7) | (op == SLJIT_MOV_U8 ? 0x20 : 0x40) | RM(dst)));
+ return push_inst(compiler, AND | RD(dst) | RN(src2) | SRC2_IMM | 0xff);
+ FAIL_IF(push_inst(compiler, MOV | RD(dst) | (24 << 7) | RM(src2)));
+ return push_inst(compiler, MOV | RD(dst) | (24 << 7) | (op == SLJIT_MOV_U8 ? 0x20 : 0x40) | RM(dst));
#else
return push_inst(compiler, (op == SLJIT_MOV_U8 ? UXTB : SXTB) | RD(dst) | RM(src2));
#endif
}
else if (dst != src2) {
SLJIT_ASSERT(src2 & SRC2_IMM);
- return push_inst(compiler, EMIT_DATA_PROCESS_INS((flags & INV_IMM) ? MVN_DP : MOV_DP, 0,
- dst, SLJIT_UNUSED, src2));
+ return push_inst(compiler, ((flags & INV_IMM) ? MVN : MOV) | RD(dst) | src2);
}
return SLJIT_SUCCESS;
@@ -1056,25 +1061,23 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED));
if (flags & MOVE_REG_CONV) {
#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (16 << 7) | RM(src2))));
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (16 << 7) | (op == SLJIT_MOV_U16 ? 0x20 : 0x40) | RM(dst)));
+ FAIL_IF(push_inst(compiler, MOV | RD(dst) | (16 << 7) | RM(src2)));
+ return push_inst(compiler, MOV | RD(dst) | (16 << 7) | (op == SLJIT_MOV_U16 ? 0x20 : 0x40) | RM(dst));
#else
return push_inst(compiler, (op == SLJIT_MOV_U16 ? UXTH : SXTH) | RD(dst) | RM(src2));
#endif
}
else if (dst != src2) {
SLJIT_ASSERT(src2 & SRC2_IMM);
- return push_inst(compiler, EMIT_DATA_PROCESS_INS((flags & INV_IMM) ? MVN_DP : MOV_DP, 0,
- dst, SLJIT_UNUSED, src2));
+ return push_inst(compiler, ((flags & INV_IMM) ? MVN : MOV) | RD(dst) | src2);
}
return SLJIT_SUCCESS;
case SLJIT_NOT:
if (src2 & SRC2_IMM) {
- return push_inst(compiler, EMIT_DATA_PROCESS_INS((flags & INV_IMM) ? MOV_DP : MVN_DP, flags & SET_FLAGS,
- dst, SLJIT_UNUSED, src2));
+ return push_inst(compiler, ((flags & INV_IMM) ? MOV : MVN) | (flags & SET_FLAGS) | RD(dst) | src2);
}
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MVN_DP, flags & SET_FLAGS, dst, SLJIT_UNUSED, RM(src2)));
+ return push_inst(compiler, MVN | (flags & SET_FLAGS) | RD(dst) | RM(src2));
case SLJIT_CLZ:
SLJIT_ASSERT(!(flags & INV_IMM));
@@ -1085,28 +1088,24 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
case SLJIT_ADD:
SLJIT_ASSERT(!(flags & INV_IMM));
if ((flags & (UNUSED_RETURN | SET_FLAGS)) == (UNUSED_RETURN | SET_FLAGS) && !(flags & ARGS_SWAPPED))
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(CMN_DP, SET_FLAGS,
- SLJIT_UNUSED, src1, (src2 & SRC2_IMM) ? src2 : RM(src2)));
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(ADD_DP, flags & SET_FLAGS,
- dst, src1, (src2 & SRC2_IMM) ? src2 : RM(src2)));
+ return push_inst(compiler, CMN | SET_FLAGS | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
+ return push_inst(compiler, ADD | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
case SLJIT_ADDC:
SLJIT_ASSERT(!(flags & INV_IMM));
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(ADC_DP, flags & SET_FLAGS,
- dst, src1, (src2 & SRC2_IMM) ? src2 : RM(src2)));
+ return push_inst(compiler, ADC | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
case SLJIT_SUB:
SLJIT_ASSERT(!(flags & INV_IMM));
if ((flags & (UNUSED_RETURN | SET_FLAGS)) == (UNUSED_RETURN | SET_FLAGS) && !(flags & ARGS_SWAPPED))
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(CMP_DP, SET_FLAGS,
- SLJIT_UNUSED, src1, (src2 & SRC2_IMM) ? src2 : RM(src2)));
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(!(flags & ARGS_SWAPPED) ? SUB_DP : RSB_DP, flags & SET_FLAGS,
- dst, src1, (src2 & SRC2_IMM) ? src2 : RM(src2)));
+ return push_inst(compiler, CMP | SET_FLAGS | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
+ return push_inst(compiler, (!(flags & ARGS_SWAPPED) ? SUB : RSB) | (flags & SET_FLAGS)
+ | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
case SLJIT_SUBC:
SLJIT_ASSERT(!(flags & INV_IMM));
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(!(flags & ARGS_SWAPPED) ? SBC_DP : RSC_DP, flags & SET_FLAGS,
- dst, src1, (src2 & SRC2_IMM) ? src2 : RM(src2)));
+ return push_inst(compiler, (!(flags & ARGS_SWAPPED) ? SBC : RSC) | (flags & SET_FLAGS)
+ | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
case SLJIT_MUL:
SLJIT_ASSERT(!(flags & INV_IMM));
@@ -1118,19 +1117,19 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
FAIL_IF(push_inst(compiler, SMULL | (reg_map[TMP_REG1] << 16) | (reg_map[dst] << 12) | (reg_map[src2] << 8) | reg_map[src1]));
/* cmp TMP_REG1, dst asr #31. */
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(CMP_DP, SET_FLAGS, SLJIT_UNUSED, TMP_REG1, RM(dst) | 0xfc0));
+ return push_inst(compiler, CMP | SET_FLAGS | RN(TMP_REG1) | RM(dst) | 0xfc0);
case SLJIT_AND:
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(!(flags & INV_IMM) ? AND_DP : BIC_DP, flags & SET_FLAGS,
- dst, src1, (src2 & SRC2_IMM) ? src2 : RM(src2)));
+ return push_inst(compiler, (!(flags & INV_IMM) ? AND : BIC) | (flags & SET_FLAGS)
+ | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
case SLJIT_OR:
SLJIT_ASSERT(!(flags & INV_IMM));
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(ORR_DP, flags & SET_FLAGS, dst, src1, (src2 & SRC2_IMM) ? src2 : RM(src2)));
+ return push_inst(compiler, ORR | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
case SLJIT_XOR:
SLJIT_ASSERT(!(flags & INV_IMM));
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(EOR_DP, flags & SET_FLAGS, dst, src1, (src2 & SRC2_IMM) ? src2 : RM(src2)));
+ return push_inst(compiler, EOR | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2)));
case SLJIT_SHL:
EMIT_SHIFT_INS_AND_RETURN(0);
@@ -1293,8 +1292,8 @@ static sljit_s32 generate_int(struct sljit_compiler *compiler, sljit_s32 reg, sl
return 0;
}
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(positive ? MOV_DP : MVN_DP, 0, reg, SLJIT_UNUSED, imm1)));
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(positive ? ORR_DP : BIC_DP, 0, reg, reg, imm2)));
+ FAIL_IF(push_inst(compiler, (positive ? MOV : MVN) | RD(reg) | imm1));
+ FAIL_IF(push_inst(compiler, (positive ? ORR : BIC) | RD(reg) | RN(reg) | imm2));
return 1;
}
#endif
@@ -1311,11 +1310,11 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
/* Create imm by 1 inst. */
tmp = get_imm(imm);
if (tmp)
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, reg, SLJIT_UNUSED, tmp));
+ return push_inst(compiler, MOV | RD(reg) | tmp);
tmp = get_imm(~imm);
if (tmp)
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MVN_DP, 0, reg, SLJIT_UNUSED, tmp));
+ return push_inst(compiler, MVN | RD(reg) | tmp);
#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
/* Create imm by 2 inst. */
@@ -1323,7 +1322,7 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
FAIL_IF(generate_int(compiler, reg, ~imm, 0));
/* Load integer. */
- return push_inst_with_literal(compiler, EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, reg, TMP_PC, 0), imm);
+ return push_inst_with_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, reg, TMP_PC, 0), imm);
#else
FAIL_IF(push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff)));
if (imm <= 0xffff)
@@ -1335,16 +1334,13 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg,
sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
{
- sljit_uw offset_reg, imm;
+ sljit_uw imm, offset_reg;
sljit_uw is_type1_transfer = IS_TYPE1_TRANSFER(flags);
SLJIT_ASSERT (arg & SLJIT_MEM);
SLJIT_ASSERT((arg & REG_MASK) != tmp_reg);
- SLJIT_COMPILE_ASSERT(WRITE_BACK == 0x10, optimized_for_emit_data_transfer);
-
if ((arg & REG_MASK) == SLJIT_UNUSED) {
- /* Write back is not used. */
if (is_type1_transfer) {
FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~0xfff));
argw &= 0xfff;
@@ -1354,7 +1350,8 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
argw &= 0xff;
}
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, 0, reg, tmp_reg, is_type1_transfer ? argw : TYPE2_TRANSFER_IMM(argw)));
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, tmp_reg,
+ is_type1_transfer ? argw : TYPE2_TRANSFER_IMM(argw)));
}
if (arg & OFFS_REG_MASK) {
@@ -1363,14 +1360,12 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
argw &= 0x3;
if (argw != 0 && !is_type1_transfer) {
- SLJIT_ASSERT(!(flags & WRITE_BACK));
-
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(ADD_DP, 0, tmp_reg, arg, RM(offset_reg) | (argw << 7))));
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, 0, reg, tmp_reg, TYPE2_TRANSFER_IMM(0)));
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | RM(offset_reg) | (argw << 7)));
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, tmp_reg, TYPE2_TRANSFER_IMM(0)));
}
/* Bit 25: RM is offset. */
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, flags & WRITE_BACK, reg, arg,
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg,
RM(offset_reg) | (is_type1_transfer ? (1 << 25) : 0) | (argw << 7)));
}
@@ -1380,60 +1375,55 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
if (argw > 0xfff) {
imm = get_imm(argw & ~0xfff);
if (imm) {
- offset_reg = (flags & WRITE_BACK) ? arg : tmp_reg;
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(ADD_DP, 0, offset_reg, arg, imm)));
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm));
argw = argw & 0xfff;
- arg = offset_reg;
+ arg = tmp_reg;
}
}
else if (argw < -0xfff) {
imm = get_imm(-argw & ~0xfff);
if (imm) {
- offset_reg = (flags & WRITE_BACK) ? arg : tmp_reg;
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(SUB_DP, 0, offset_reg, arg, imm)));
+ FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm));
argw = -(-argw & 0xfff);
- arg = offset_reg;
+ arg = tmp_reg;
}
}
- if (argw >= 0 && argw <= 0xfff) {
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, flags & WRITE_BACK, reg, arg & REG_MASK, argw));
- }
- if (argw < 0 && argw >= -0xfff) {
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, flags & WRITE_BACK, reg, arg & REG_MASK, -argw));
- }
+ if (argw >= 0 && argw <= 0xfff)
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, argw));
+
+ if (argw < 0 && argw >= -0xfff)
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, -argw));
}
else {
if (argw > 0xff) {
imm = get_imm(argw & ~0xff);
if (imm) {
- offset_reg = (flags & WRITE_BACK) ? arg : tmp_reg;
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(ADD_DP, 0, offset_reg, arg, imm)));
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm));
argw = argw & 0xff;
- arg = offset_reg;
+ arg = tmp_reg;
}
}
else if (argw < -0xff) {
imm = get_imm(-argw & ~0xff);
if (imm) {
- offset_reg = (flags & WRITE_BACK) ? arg : tmp_reg;
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(SUB_DP, 0, offset_reg, arg, imm)));
+ FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm));
argw = -(-argw & 0xff);
- arg = offset_reg;
+ arg = tmp_reg;
}
}
- if (argw >= 0 && argw <= 0xff) {
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, flags & WRITE_BACK, reg, arg, TYPE2_TRANSFER_IMM(argw)));
- }
+ if (argw >= 0 && argw <= 0xff)
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, TYPE2_TRANSFER_IMM(argw)));
+
if (argw < 0 && argw >= -0xff) {
argw = -argw;
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, flags & WRITE_BACK, reg, arg, TYPE2_TRANSFER_IMM(argw)));
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, TYPE2_TRANSFER_IMM(argw)));
}
}
FAIL_IF(load_immediate(compiler, tmp_reg, argw));
- return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, flags & WRITE_BACK, reg, arg,
+ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg,
RM(tmp_reg) | (is_type1_transfer ? (1 << 25) : 0)));
}
@@ -1536,10 +1526,10 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
/* Destination. */
dst_reg = SLOW_IS_REG(dst) ? dst : TMP_REG2;
- if (op <= SLJIT_MOVU_P) {
+ if (op <= SLJIT_MOV_P) {
if (dst & SLJIT_MEM) {
- if (inp_flags & BYTE_DATA)
- inp_flags &= ~SIGNED_DATA;
+ if (inp_flags & BYTE_SIZE)
+ inp_flags &= ~SIGNED;
if (FAST_IS_REG(src2))
return emit_op_mem(compiler, inp_flags, src2, dst, dstw, TMP_REG2);
@@ -1551,7 +1541,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
/* Source 2. */
if (src2_reg == 0) {
- src2_reg = (op <= SLJIT_MOVU_P) ? dst_reg : TMP_REG2;
+ src2_reg = (op <= SLJIT_MOV_P) ? dst_reg : TMP_REG2;
if (FAST_IS_REG(src2))
src2_reg = src2;
@@ -1672,7 +1662,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) {
#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
if (op <= SLJIT_MOV_P && (src & SLJIT_MEM))
- return emit_op_mem(compiler, PRELOAD_DATA | LOAD_DATA, TMP_PC, src, srcw, TMP_REG1);
+ return emit_op_mem(compiler, PRELOAD | LOAD_DATA, TMP_PC, src, srcw, TMP_REG1);
#endif
return SLJIT_SUCCESS;
}
@@ -1685,34 +1675,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw);
case SLJIT_MOV_U8:
- return emit_op(compiler, SLJIT_MOV_U8, ALLOW_ANY_IMM | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_U8, ALLOW_ANY_IMM | BYTE_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
case SLJIT_MOV_S8:
- return emit_op(compiler, SLJIT_MOV_S8, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_S8, ALLOW_ANY_IMM | SIGNED | BYTE_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
case SLJIT_MOV_U16:
- return emit_op(compiler, SLJIT_MOV_U16, ALLOW_ANY_IMM | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_U16, ALLOW_ANY_IMM | HALF_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
case SLJIT_MOV_S16:
- return emit_op(compiler, SLJIT_MOV_S16, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
-
- case SLJIT_MOVU:
- case SLJIT_MOVU_U32:
- case SLJIT_MOVU_S32:
- case SLJIT_MOVU_P:
- return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw);
-
- case SLJIT_MOVU_U8:
- return emit_op(compiler, SLJIT_MOV_U8, ALLOW_ANY_IMM | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
-
- case SLJIT_MOVU_S8:
- return emit_op(compiler, SLJIT_MOV_S8, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
-
- case SLJIT_MOVU_U16:
- return emit_op(compiler, SLJIT_MOV_U16, ALLOW_ANY_IMM | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
-
- case SLJIT_MOVU_S16:
- return emit_op(compiler, SLJIT_MOV_S16, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
+ return emit_op(compiler, SLJIT_MOV_S16, ALLOW_ANY_IMM | SIGNED | HALF_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
case SLJIT_NOT:
return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw);
@@ -1785,7 +1757,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
{
CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- return reg << 1;
+ return (freg_map[reg] << 1);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
@@ -1804,9 +1776,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
#define FPU_LOAD (1 << 20)
#define EMIT_FPU_DATA_TRANSFER(inst, add, base, freg, offs) \
- ((inst) | ((add) << 23) | (reg_map[base] << 16) | (freg << 12) | (offs))
+ ((inst) | ((add) << 23) | (reg_map[base] << 16) | (freg_map[freg] << 12) | (offs))
#define EMIT_FPU_OPERATION(opcode, mode, dst, src1, src2) \
- ((opcode) | (mode) | ((dst) << 12) | (src1) | ((src2) << 16))
+ ((opcode) | (mode) | (freg_map[dst] << 12) | freg_map[src1] | (freg_map[src2] << 16))
static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
{
@@ -1817,7 +1789,7 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
arg &= ~SLJIT_MEM;
if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG2, arg & REG_MASK, RM(OFFS_REG(arg)) | ((argw & 0x3) << 7))));
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | ((argw & 0x3) << 7)));
arg = TMP_REG2;
argw = 0;
}
@@ -1831,20 +1803,20 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
imm = get_imm(argw & ~0x3fc);
if (imm) {
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG2, arg & REG_MASK, imm)));
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | imm));
return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG2, reg, (argw & 0x3fc) >> 2));
}
imm = get_imm(-argw & ~0x3fc);
if (imm) {
argw = -argw;
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(SUB_DP, 0, TMP_REG2, arg & REG_MASK, imm)));
+ FAIL_IF(push_inst(compiler, SUB | RD(TMP_REG2) | RN(arg & REG_MASK) | imm));
return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, TMP_REG2, reg, (argw & 0x3fc) >> 2));
}
}
if (arg) {
FAIL_IF(load_immediate(compiler, TMP_REG2, argw));
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG2, arg & REG_MASK, RM(TMP_REG2))));
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | RM(TMP_REG2)));
}
else
FAIL_IF(load_immediate(compiler, TMP_REG2, argw));
@@ -1866,7 +1838,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_F32_OP, TMP_FREG1, src, 0)));
if (FAST_IS_REG(dst))
- return push_inst(compiler, VMOV | (1 << 20) | RD(dst) | (TMP_FREG1 << 16));
+ return push_inst(compiler, VMOV | (1 << 20) | RD(dst) | (freg_map[TMP_FREG1] << 16));
/* Store the integer value from a VFP register. */
return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw);
@@ -1881,14 +1853,14 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
op ^= SLJIT_F32_OP;
if (FAST_IS_REG(src))
- FAIL_IF(push_inst(compiler, VMOV | RD(src) | (TMP_FREG1 << 16)));
+ FAIL_IF(push_inst(compiler, VMOV | RD(src) | (freg_map[TMP_FREG1] << 16)));
else if (src & SLJIT_MEM) {
/* Load the integer value into a VFP register. */
FAIL_IF(emit_fop_mem(compiler, FPU_LOAD, TMP_FREG1, src, srcw));
}
else {
FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
- FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | (TMP_FREG1 << 16)));
+ FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | (freg_map[TMP_FREG1] << 16)));
}
FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F32_S32, op & SLJIT_F32_OP, dst_r, TMP_FREG1, 0)));
@@ -2018,7 +1990,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
#undef FPU_LOAD
#undef EMIT_FPU_DATA_TRANSFER
-#undef EMIT_FPU_OPERATION
/* --------------------------------------------------------------------- */
/* Other instructions */
@@ -2030,13 +2001,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
ADJUST_LOCAL_OFFSET(dst, dstw);
- SLJIT_ASSERT(reg_map[TMP_REG1] == 14);
+ SLJIT_ASSERT(reg_map[TMP_REG2] == 14);
if (FAST_IS_REG(dst))
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, RM(TMP_REG1)));
+ return push_inst(compiler, MOV | RD(dst) | RM(TMP_REG2));
/* Memory. */
- return emit_op_mem(compiler, WORD_DATA, TMP_REG1, dst, dstw, TMP_REG2);
+ return emit_op_mem(compiler, WORD_SIZE, TMP_REG2, dst, dstw, TMP_REG1);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw)
@@ -2045,16 +2016,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
CHECK(check_sljit_emit_fast_return(compiler, src, srcw));
ADJUST_LOCAL_OFFSET(src, srcw);
- SLJIT_ASSERT(reg_map[TMP_REG1] == 14);
+ SLJIT_ASSERT(reg_map[TMP_REG2] == 14);
if (FAST_IS_REG(src))
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG1, 0, RM(src))));
- else if (src & SLJIT_MEM)
- FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG2));
- else if (src & SLJIT_IMM)
- FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG2) | RM(src)));
+ else
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG2, src, srcw, TMP_REG1));
- return push_inst(compiler, BX | RM(TMP_REG1));
+ return push_inst(compiler, BX | RM(TMP_REG2));
}
/* --------------------------------------------------------------------- */
@@ -2111,7 +2080,7 @@ static sljit_uw get_cc(sljit_s32 type)
return 0x70000000;
default:
- SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL3);
+ SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_CDECL);
return 0xe0000000;
}
}
@@ -2144,12 +2113,13 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
type &= 0xff;
- /* In ARM, we don't need to touch the arguments. */
+ SLJIT_ASSERT(reg_map[TMP_REG1] != 14);
+
#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
if (type >= SLJIT_FAST_CALL)
PTR_FAIL_IF(prepare_blx(compiler));
- PTR_FAIL_IF(push_inst_with_unique_literal(compiler, ((EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0,
- type <= SLJIT_JUMP ? TMP_PC : TMP_REG2, TMP_PC, 0)) & ~COND_MASK) | get_cc(type), 0));
+ PTR_FAIL_IF(push_inst_with_unique_literal(compiler, ((EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1,
+ type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, TMP_PC, 0)) & ~COND_MASK) | get_cc(type), 0));
if (jump->flags & SLJIT_REWRITABLE_JUMP) {
jump->addr = compiler->size;
@@ -2166,13 +2136,248 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
#else
if (type >= SLJIT_FAST_CALL)
jump->flags |= IS_BL;
- PTR_FAIL_IF(emit_imm(compiler, TMP_REG2, 0));
- PTR_FAIL_IF(push_inst(compiler, (((type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG2)) & ~COND_MASK) | get_cc(type)));
+ PTR_FAIL_IF(emit_imm(compiler, TMP_REG1, 0));
+ PTR_FAIL_IF(push_inst(compiler, (((type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1)) & ~COND_MASK) | get_cc(type)));
jump->addr = compiler->size;
#endif
return jump;
}
+#ifdef __SOFTFP__
+
+static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src)
+{
+ sljit_s32 stack_offset = 0;
+ sljit_s32 arg_count = 0;
+ sljit_s32 word_arg_offset = 0;
+ sljit_s32 float_arg_count = 0;
+ sljit_s32 types = 0;
+ sljit_s32 src_offset = 4 * sizeof(sljit_sw);
+ sljit_u8 offsets[4];
+
+ if (src && FAST_IS_REG(*src))
+ src_offset = reg_map[*src] * sizeof(sljit_sw);
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ offsets[arg_count] = (sljit_u8)stack_offset;
+ stack_offset += sizeof(sljit_f32);
+ arg_count++;
+ float_arg_count++;
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ if (stack_offset & 0x7)
+ stack_offset += sizeof(sljit_sw);
+ offsets[arg_count] = (sljit_u8)stack_offset;
+ stack_offset += sizeof(sljit_f64);
+ arg_count++;
+ float_arg_count++;
+ break;
+ default:
+ offsets[arg_count] = (sljit_u8)stack_offset;
+ stack_offset += sizeof(sljit_sw);
+ arg_count++;
+ word_arg_offset += sizeof(sljit_sw);
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ if (stack_offset > 16)
+ FAIL_IF(push_inst(compiler, SUB | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | (((stack_offset - 16) + 0x7) & ~0x7)));
+
+ /* Process arguments in reversed direction. */
+ while (types) {
+ switch (types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ arg_count--;
+ float_arg_count--;
+ stack_offset = offsets[arg_count];
+
+ if (stack_offset < 16) {
+ if (src_offset == stack_offset) {
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | (src_offset >> 2)));
+ *src = TMP_REG1;
+ }
+ FAIL_IF(push_inst(compiler, VMOV | 0x100000 | (float_arg_count << 16) | (stack_offset << 10)));
+ } else
+ FAIL_IF(push_inst(compiler, VSTR_F32 | 0x800000 | RN(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2)));
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ arg_count--;
+ float_arg_count--;
+ stack_offset = offsets[arg_count];
+
+ SLJIT_ASSERT((stack_offset & 0x7) == 0);
+
+ if (stack_offset < 16) {
+ if (src_offset == stack_offset || src_offset == stack_offset + sizeof(sljit_sw)) {
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | (src_offset >> 2)));
+ *src = TMP_REG1;
+ }
+ FAIL_IF(push_inst(compiler, VMOV2 | 0x100000 | (stack_offset << 10) | ((stack_offset + sizeof(sljit_sw)) << 14) | float_arg_count));
+ } else
+ FAIL_IF(push_inst(compiler, VSTR_F32 | 0x800100 | RN(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2)));
+ break;
+ default:
+ arg_count--;
+ word_arg_offset -= sizeof(sljit_sw);
+ stack_offset = offsets[arg_count];
+
+ SLJIT_ASSERT(stack_offset >= word_arg_offset);
+
+ if (stack_offset != word_arg_offset) {
+ if (stack_offset < 16) {
+ if (src_offset == stack_offset) {
+ FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | (src_offset >> 2)));
+ *src = TMP_REG1;
+ }
+ else if (src_offset == word_arg_offset) {
+ *src = 1 + (stack_offset >> 2);
+ src_offset = stack_offset;
+ }
+ FAIL_IF(push_inst(compiler, MOV | (stack_offset << 10) | (word_arg_offset >> 2)));
+ } else
+ FAIL_IF(push_inst(compiler, data_transfer_insts[WORD_SIZE] | 0x800000 | RN(SLJIT_SP) | (word_arg_offset << 10) | (stack_offset - 16)));
+ }
+ break;
+ }
+
+ types >>= SLJIT_DEF_SHIFT;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 softfloat_post_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
+{
+ sljit_s32 stack_size = 0;
+
+ if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32)
+ FAIL_IF(push_inst(compiler, VMOV | (0 << 16) | (0 << 12)));
+ if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64)
+ FAIL_IF(push_inst(compiler, VMOV2 | (1 << 16) | (0 << 12) | 0));
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ stack_size += sizeof(sljit_f32);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ if (stack_size & 0x7)
+ stack_size += sizeof(sljit_sw);
+ stack_size += sizeof(sljit_f64);
+ break;
+ default:
+ stack_size += sizeof(sljit_sw);
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ if (stack_size <= 16)
+ return SLJIT_SUCCESS;
+
+ return push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | (((stack_size - 16) + 0x7) & ~0x7));
+}
+
+#else /* !__SOFTFP__ */
+
+static sljit_s32 hardfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
+{
+ sljit_u32 remap = 0;
+ sljit_u32 offset = 0;
+ sljit_u32 new_offset, mask;
+
+ /* Remove return value. */
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32) {
+ new_offset = 0;
+ mask = 1;
+
+ while (remap & mask) {
+ new_offset++;
+ mask <<= 1;
+ }
+ remap |= mask;
+
+ if (offset != new_offset)
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32,
+ 0, (new_offset >> 1) + 1, (offset >> 1) + 1, 0) | ((new_offset & 0x1) ? 0x400000 : 0)));
+
+ offset += 2;
+ }
+ else if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64) {
+ new_offset = 0;
+ mask = 3;
+
+ while (remap & mask) {
+ new_offset += 2;
+ mask <<= 2;
+ }
+ remap |= mask;
+
+ if (offset != new_offset)
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, SLJIT_F32_OP, (new_offset >> 1) + 1, (offset >> 1) + 1, 0)));
+
+ offset += 2;
+ }
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+#endif /* __SOFTFP__ */
+
+#undef EMIT_FPU_OPERATION
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+#ifdef __SOFTFP__
+ struct sljit_jump *jump;
+#endif
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+#ifdef __SOFTFP__
+ PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ jump = sljit_emit_jump(compiler, type);
+ PTR_FAIL_IF(jump == NULL);
+
+ PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types));
+ return jump;
+#else /* !__SOFTFP__ */
+ PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_jump(compiler, type);
+#endif /* __SOFTFP__ */
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
{
struct sljit_jump *jump;
@@ -2181,16 +2386,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
ADJUST_LOCAL_OFFSET(src, srcw);
- /* In ARM, we don't need to touch the arguments. */
+ SLJIT_ASSERT(reg_map[TMP_REG1] != 14);
+
if (!(src & SLJIT_IMM)) {
- if (FAST_IS_REG(src))
+ if (FAST_IS_REG(src)) {
+ SLJIT_ASSERT(reg_map[src] != 14);
return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(src));
+ }
SLJIT_ASSERT(src & SLJIT_MEM);
- FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw, TMP_REG2));
- return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG2));
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
+ return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1));
}
+ /* These jumps are converted to jump/call instructions when possible. */
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0));
@@ -2199,22 +2408,57 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
if (type >= SLJIT_FAST_CALL)
FAIL_IF(prepare_blx(compiler));
- FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, type <= SLJIT_JUMP ? TMP_PC : TMP_REG2, TMP_PC, 0), 0));
+ FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, TMP_PC, 0), 0));
if (type >= SLJIT_FAST_CALL)
FAIL_IF(emit_blx(compiler));
#else
- FAIL_IF(emit_imm(compiler, TMP_REG2, 0));
- FAIL_IF(push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG2)));
+ FAIL_IF(emit_imm(compiler, TMP_REG1, 0));
+ FAIL_IF(push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1)));
#endif
jump->addr = compiler->size;
return SLJIT_SUCCESS;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+#ifdef __SOFTFP__
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ }
+
+ FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
+
+ return softfloat_post_call_with_args(compiler, arg_types);
+#else /* !__SOFTFP__ */
+ FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_ijump(compiler, type, src, srcw);
+#endif /* __SOFTFP__ */
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
{
- sljit_s32 dst_r, flags = GET_ALL_FLAGS(op);
+ sljit_s32 dst_reg, flags = GET_ALL_FLAGS(op);
sljit_uw cc, ins;
CHECK_ERROR();
@@ -2223,31 +2467,31 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
op = GET_OPCODE(op);
cc = get_cc(type & 0xff);
- dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1;
if (op < SLJIT_ADD) {
- FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst_r, SLJIT_UNUSED, SRC2_IMM | 0)));
- FAIL_IF(push_inst(compiler, (EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst_r, SLJIT_UNUSED, SRC2_IMM | 1) & ~COND_MASK) | cc));
+ FAIL_IF(push_inst(compiler, MOV | RD(dst_reg) | SRC2_IMM | 0));
+ FAIL_IF(push_inst(compiler, ((MOV | RD(dst_reg) | SRC2_IMM | 1) & ~COND_MASK) | cc));
if (dst & SLJIT_MEM)
- return emit_op_mem(compiler, WORD_DATA, TMP_REG1, dst, dstw, TMP_REG2);
+ return emit_op_mem(compiler, WORD_SIZE, TMP_REG1, dst, dstw, TMP_REG2);
return SLJIT_SUCCESS;
}
- ins = (op == SLJIT_AND ? AND_DP : (op == SLJIT_OR ? ORR_DP : EOR_DP));
+ ins = (op == SLJIT_AND ? AND : (op == SLJIT_OR ? ORR : EOR));
if (dst & SLJIT_MEM)
- FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, dst, dstw, TMP_REG2));
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, dst, dstw, TMP_REG2));
- FAIL_IF(push_inst(compiler, (EMIT_DATA_PROCESS_INS(ins, 0, dst_r, dst_r, SRC2_IMM | 1) & ~COND_MASK) | cc));
+ FAIL_IF(push_inst(compiler, ((ins | RD(dst_reg) | RN(dst_reg) | SRC2_IMM | 1) & ~COND_MASK) | cc));
if (op == SLJIT_AND)
- FAIL_IF(push_inst(compiler, (EMIT_DATA_PROCESS_INS(ins, 0, dst_r, dst_r, SRC2_IMM | 0) & ~COND_MASK) | (cc ^ 0x10000000)));
+ FAIL_IF(push_inst(compiler, ((ins | RD(dst_reg) | RN(dst_reg) | SRC2_IMM | 0) & ~COND_MASK) | (cc ^ 0x10000000)));
if (dst & SLJIT_MEM)
- FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG1, dst, dstw, TMP_REG2));
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, dst, dstw, TMP_REG2));
if (flags & SLJIT_SET_Z)
- return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, SET_FLAGS, TMP_REG2, SLJIT_UNUSED, RM(dst_r)));
+ return push_inst(compiler, MOV | SET_FLAGS | RD(TMP_REG2) | RM(dst_reg));
return SLJIT_SUCCESS;
}
@@ -2267,11 +2511,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
tmp = get_imm(srcw);
if (tmp)
- return push_inst(compiler, (EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst_reg, SLJIT_UNUSED, tmp) & ~COND_MASK) | cc);
+ return push_inst(compiler, ((MOV | RD(dst_reg) | tmp) & ~COND_MASK) | cc);
tmp = get_imm(~srcw);
if (tmp)
- return push_inst(compiler, (EMIT_DATA_PROCESS_INS(MVN_DP, 0, dst_reg, SLJIT_UNUSED, tmp) & ~COND_MASK) | cc);
+ return push_inst(compiler, ((MVN | RD(dst_reg) | tmp) & ~COND_MASK) | cc);
#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
tmp = (sljit_uw) srcw;
@@ -2285,7 +2529,111 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
#endif
}
- return push_inst(compiler, (EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst_reg, SLJIT_UNUSED, RM(src)) & ~COND_MASK) | cc);
+ return push_inst(compiler, ((MOV | RD(dst_reg) | RM(src)) & ~COND_MASK) | cc);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_s32 flags;
+ sljit_uw is_type1_transfer, inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ is_type1_transfer = 1;
+
+ switch (type & 0xff) {
+ case SLJIT_MOV:
+ case SLJIT_MOV_U32:
+ case SLJIT_MOV_S32:
+ case SLJIT_MOV_P:
+ flags = WORD_SIZE;
+ break;
+ case SLJIT_MOV_U8:
+ flags = BYTE_SIZE;
+ break;
+ case SLJIT_MOV_S8:
+ if (!(type & SLJIT_MEM_STORE))
+ is_type1_transfer = 0;
+ flags = BYTE_SIZE | SIGNED;
+ break;
+ case SLJIT_MOV_U16:
+ is_type1_transfer = 0;
+ flags = HALF_SIZE;
+ break;
+ case SLJIT_MOV_S16:
+ is_type1_transfer = 0;
+ flags = HALF_SIZE | SIGNED;
+ break;
+ default:
+ SLJIT_UNREACHABLE();
+ flags = WORD_SIZE;
+ break;
+ }
+
+ if (!(type & SLJIT_MEM_STORE))
+ flags |= LOAD_DATA;
+
+ SLJIT_ASSERT(is_type1_transfer == !!IS_TYPE1_TRANSFER(flags));
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ if (!is_type1_transfer && memw != 0)
+ return SLJIT_ERR_UNSUPPORTED;
+ }
+ else {
+ if (is_type1_transfer) {
+ if (memw > 4095 && memw < -4095)
+ return SLJIT_ERR_UNSUPPORTED;
+ }
+ else {
+ if (memw > 255 && memw < -255)
+ return SLJIT_ERR_UNSUPPORTED;
+ }
+ }
+
+ if (type & SLJIT_MEM_SUPP)
+ return SLJIT_SUCCESS;
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ memw &= 0x3;
+
+ inst = EMIT_DATA_TRANSFER(flags, 1, reg, mem & REG_MASK, RM(OFFS_REG(mem)) | (memw << 7));
+
+ if (is_type1_transfer)
+ inst |= (1 << 25);
+
+ if (type & SLJIT_MEM_PRE)
+ inst |= (1 << 21);
+ else
+ inst ^= (1 << 24);
+
+ return push_inst(compiler, inst);
+ }
+
+ inst = EMIT_DATA_TRANSFER(flags, 0, reg, mem & REG_MASK, 0);
+
+ if (type & SLJIT_MEM_PRE)
+ inst |= (1 << 21);
+ else
+ inst ^= (1 << 24);
+
+ if (is_type1_transfer) {
+ if (memw >= 0)
+ inst |= (1 << 23);
+ else
+ memw = -memw;
+
+ return push_inst(compiler, inst | memw);
+ }
+
+ if (memw >= 0)
+ inst |= (1 << 23);
+ else
+ memw = -memw;
+
+ return push_inst(compiler, inst | TYPE2_TRANSFER_IMM(memw));
}
SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
@@ -2303,7 +2651,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi
reg = SLOW_IS_REG(dst) ? dst : TMP_REG2;
#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
- PTR_FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, reg, TMP_PC, 0), init_value));
+ PTR_FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, reg, TMP_PC, 0), init_value));
compiler->patches++;
#else
PTR_FAIL_IF(emit_imm(compiler, reg, init_value));
@@ -2311,7 +2659,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi
set_const(const_, compiler);
if (dst & SLJIT_MEM)
- PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw, TMP_REG1));
+ PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, dst, dstw, TMP_REG1));
return const_;
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c
index fd67f50253..8a437bd6a0 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_64.c
@@ -36,15 +36,19 @@ typedef sljit_u32 sljit_ins;
#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
-#define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4)
-#define TMP_LR (SLJIT_NUMBER_OF_REGISTERS + 5)
-#define TMP_SP (SLJIT_NUMBER_OF_REGISTERS + 6)
+#define TMP_LR (SLJIT_NUMBER_OF_REGISTERS + 4)
+#define TMP_SP (SLJIT_NUMBER_OF_REGISTERS + 5)
-#define TMP_FREG1 (0)
-#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
+/* r18 - platform register, currently not used */
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 8] = {
- 31, 0, 1, 2, 3, 4, 5, 6, 7, 12, 13, 14, 15, 16, 17, 8, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 29, 9, 10, 11, 30, 31
+ 31, 0, 1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 8, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 29, 9, 10, 30, 31
+};
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
+ 0, 0, 1, 2, 3, 4, 5, 6, 7
};
#define W_OP (1 << 31)
@@ -53,10 +57,10 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 8] = {
#define RN(rn) (reg_map[rn] << 5)
#define RT2(rt2) (reg_map[rt2] << 10)
#define RM(rm) (reg_map[rm] << 16)
-#define VD(vd) (vd)
-#define VT(vt) (vt)
-#define VN(vn) ((vn) << 5)
-#define VM(vm) ((vm) << 16)
+#define VD(vd) (freg_map[vd])
+#define VT(vt) (freg_map[vt])
+#define VN(vn) (freg_map[vn] << 5)
+#define VM(vm) (freg_map[vm] << 16)
/* --------------------------------------------------------------------- */
/* Instrucion forms */
@@ -112,10 +116,13 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 8] = {
#define SMULH 0x9b403c00
#define STP 0xa9000000
#define STP_PRE 0xa9800000
+#define STRB 0x38206800
+#define STRBI 0x39000000
#define STRI 0xf9000000
#define STR_FI 0x3d000000
#define STR_FR 0x3c206800
#define STUR_FI 0x3c000000
+#define STURBI 0x38000000
#define SUB 0xcb000000
#define SUBI 0xd1000000
#define SUBS 0xeb000000
@@ -193,6 +200,7 @@ static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_in
code_ptr[-2] = code_ptr[0];
return 2;
}
+
if (target_addr <= 0xffffffffffffl) {
if (jump->flags & IS_COND)
code_ptr[-5] -= (1 << 5);
@@ -335,7 +343,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
return 1;
#endif
- case SLJIT_HAS_PRE_UPDATE:
case SLJIT_HAS_CLZ:
case SLJIT_HAS_CMOV:
return 1;
@@ -394,12 +401,14 @@ static sljit_ins logical_imm(sljit_sw imm, sljit_s32 len)
SLJIT_ASSERT((len == 32 && imm != 0 && imm != -1)
|| (len == 16 && (sljit_s32)imm != 0 && (sljit_s32)imm != -1));
+
uimm = (sljit_uw)imm;
while (1) {
if (len <= 0) {
SLJIT_UNREACHABLE();
return 0;
}
+
mask = ((sljit_uw)1 << len) - 1;
if ((uimm & mask) != ((uimm >> len) & mask))
break;
@@ -448,39 +457,42 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst,
sljit_s32 i, zeros, ones, first;
sljit_ins bitmask;
+ /* Handling simple immediates first. */
if (imm <= 0xffff)
return push_inst(compiler, MOVZ | RD(dst) | (imm << 5));
- if (simm >= -0x10000 && simm < 0)
+ if (simm < 0 && simm >= -0x10000)
return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5));
if (imm <= 0xffffffffl) {
+ if ((imm & 0xffff) == 0)
+ return push_inst(compiler, MOVZ | RD(dst) | ((imm >> 16) << 5) | (1 << 21));
if ((imm & 0xffff0000l) == 0xffff0000)
return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff) << 5));
if ((imm & 0xffff) == 0xffff)
return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
+
bitmask = logical_imm(simm, 16);
if (bitmask != 0)
return push_inst(compiler, (ORRI ^ W_OP) | RD(dst) | RN(TMP_ZERO) | bitmask);
- }
- else {
- bitmask = logical_imm(simm, 32);
- if (bitmask != 0)
- return push_inst(compiler, ORRI | RD(dst) | RN(TMP_ZERO) | bitmask);
- }
- if (imm <= 0xffffffffl) {
FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5)));
return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
}
- if (simm >= -0x100000000l && simm < 0) {
+ bitmask = logical_imm(simm, 32);
+ if (bitmask != 0)
+ return push_inst(compiler, ORRI | RD(dst) | RN(TMP_ZERO) | bitmask);
+
+ if (simm < 0 && simm >= -0x100000000l) {
+ if ((imm & 0xffff) == 0xffff)
+ return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
+
FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5)));
return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
}
- /* A large amount of number can be constructed from ORR and MOVx,
- but computing them is costly. We don't */
+ /* A large amount of number can be constructed from ORR and MOVx, but computing them is costly. */
zeros = 0;
ones = 0;
@@ -533,9 +545,6 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst,
#define INT_OP 0x0040000
#define SET_FLAGS 0x0080000
#define UNUSED_RETURN 0x0100000
-#define SLOW_DEST 0x0200000
-#define SLOW_SRC1 0x0400000
-#define SLOW_SRC2 0x0800000
#define CHECK_FLAGS(flag_bits) \
if (flags & SET_FLAGS) { \
@@ -693,40 +702,32 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
switch (op) {
case SLJIT_MOV:
case SLJIT_MOV_P:
- case SLJIT_MOVU:
- case SLJIT_MOVU_P:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
if (dst == arg2)
return SLJIT_SUCCESS;
return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(arg2));
case SLJIT_MOV_U8:
- case SLJIT_MOVU_U8:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
return push_inst(compiler, (UBFM ^ (1 << 31)) | RD(dst) | RN(arg2) | (7 << 10));
case SLJIT_MOV_S8:
- case SLJIT_MOVU_S8:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
if (!(flags & INT_OP))
inv_bits |= 1 << 22;
return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (7 << 10));
case SLJIT_MOV_U16:
- case SLJIT_MOVU_U16:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
return push_inst(compiler, (UBFM ^ (1 << 31)) | RD(dst) | RN(arg2) | (15 << 10));
case SLJIT_MOV_S16:
- case SLJIT_MOVU_S16:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
if (!(flags & INT_OP))
inv_bits |= 1 << 22;
return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (15 << 10));
case SLJIT_MOV_U32:
- case SLJIT_MOVU_U32:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
if ((flags & INT_OP) && dst == arg2)
return SLJIT_SUCCESS;
return push_inst(compiler, (ORR ^ (1 << 31)) | RD(dst) | RN(TMP_ZERO) | RM(arg2));
case SLJIT_MOV_S32:
- case SLJIT_MOVU_S32:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
if ((flags & INT_OP) && dst == arg2)
return SLJIT_SUCCESS;
@@ -795,292 +796,67 @@ set_flags:
return SLJIT_SUCCESS;
}
-#define STORE 0x01
-#define SIGNED 0x02
-
-#define UPDATE 0x04
-#define ARG_TEST 0x08
+#define STORE 0x10
+#define SIGNED 0x20
-#define BYTE_SIZE 0x000
-#define HALF_SIZE 0x100
-#define INT_SIZE 0x200
-#define WORD_SIZE 0x300
+#define BYTE_SIZE 0x0
+#define HALF_SIZE 0x1
+#define INT_SIZE 0x2
+#define WORD_SIZE 0x3
-#define MEM_SIZE_SHIFT(flags) ((flags) >> 8)
-
-static const sljit_ins sljit_mem_imm[4] = {
-/* u l */ 0x39400000 /* ldrb [reg,imm] */,
-/* u s */ 0x39000000 /* strb [reg,imm] */,
-/* s l */ 0x39800000 /* ldrsb [reg,imm] */,
-/* s s */ 0x39000000 /* strb [reg,imm] */,
-};
-
-static const sljit_ins sljit_mem_simm[4] = {
-/* u l */ 0x38400000 /* ldurb [reg,imm] */,
-/* u s */ 0x38000000 /* sturb [reg,imm] */,
-/* s l */ 0x38800000 /* ldursb [reg,imm] */,
-/* s s */ 0x38000000 /* sturb [reg,imm] */,
-};
-
-static const sljit_ins sljit_mem_pre_simm[4] = {
-/* u l */ 0x38400c00 /* ldrb [reg,imm]! */,
-/* u s */ 0x38000c00 /* strb [reg,imm]! */,
-/* s l */ 0x38800c00 /* ldrsb [reg,imm]! */,
-/* s s */ 0x38000c00 /* strb [reg,imm]! */,
-};
-
-static const sljit_ins sljit_mem_reg[4] = {
-/* u l */ 0x38606800 /* ldrb [reg,reg] */,
-/* u s */ 0x38206800 /* strb [reg,reg] */,
-/* s l */ 0x38a06800 /* ldrsb [reg,reg] */,
-/* s s */ 0x38206800 /* strb [reg,reg] */,
-};
-
-/* Helper function. Dst should be reg + value, using at most 1 instruction, flags does not set. */
-static sljit_s32 emit_set_delta(struct sljit_compiler *compiler, sljit_s32 dst, sljit_s32 reg, sljit_sw value)
-{
- if (value >= 0) {
- if (value <= 0xfff)
- return push_inst(compiler, ADDI | RD(dst) | RN(reg) | (value << 10));
- if (value <= 0xffffff && !(value & 0xfff))
- return push_inst(compiler, ADDI | (1 << 22) | RD(dst) | RN(reg) | (value >> 2));
- }
- else {
- value = -value;
- if (value <= 0xfff)
- return push_inst(compiler, SUBI | RD(dst) | RN(reg) | (value << 10));
- if (value <= 0xffffff && !(value & 0xfff))
- return push_inst(compiler, SUBI | (1 << 22) | RD(dst) | RN(reg) | (value >> 2));
- }
- return SLJIT_ERR_UNSUPPORTED;
-}
+#define MEM_SIZE_SHIFT(flags) ((flags) & 0x3)
-/* Can perform an operation using at most 1 instruction. */
-static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
+static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg,
+ sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
{
sljit_u32 shift = MEM_SIZE_SHIFT(flags);
+ sljit_u32 type = (shift << 30);
- SLJIT_ASSERT(arg & SLJIT_MEM);
-
- if (SLJIT_UNLIKELY(flags & UPDATE)) {
- if ((arg & REG_MASK) && !(arg & OFFS_REG_MASK) && argw <= 255 && argw >= -256) {
- if (SLJIT_UNLIKELY(flags & ARG_TEST))
- return 1;
+ if (!(flags & STORE))
+ type |= (flags & SIGNED) ? 0x00800000 : 0x00400000;
- arg &= REG_MASK;
- argw &= 0x1ff;
- FAIL_IF(push_inst(compiler, sljit_mem_pre_simm[flags & 0x3]
- | (shift << 30) | RT(reg) | RN(arg) | (argw << 12)));
- return -1;
- }
- return 0;
- }
+ SLJIT_ASSERT(arg & SLJIT_MEM);
if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
argw &= 0x3;
- if (argw && argw != shift)
- return 0;
- if (SLJIT_UNLIKELY(flags & ARG_TEST))
- return 1;
+ if (argw == 0 || argw == shift)
+ return push_inst(compiler, STRB | type | RT(reg)
+ | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
- FAIL_IF(push_inst(compiler, sljit_mem_reg[flags & 0x3] | (shift << 30) | RT(reg)
- | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0)));
- return -1;
+ FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
+ return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg));
}
arg &= REG_MASK;
- if (arg == SLJIT_UNUSED)
- return 0;
-
- if (argw >= 0 && (argw >> shift) <= 0xfff && (argw & ((1 << shift) - 1)) == 0) {
- if (SLJIT_UNLIKELY(flags & ARG_TEST))
- return 1;
-
- FAIL_IF(push_inst(compiler, sljit_mem_imm[flags & 0x3] | (shift << 30)
- | RT(reg) | RN(arg) | (argw << (10 - shift))));
- return -1;
- }
-
- if (argw > 255 || argw < -256)
- return 0;
-
- if (SLJIT_UNLIKELY(flags & ARG_TEST))
- return 1;
+ if (arg == SLJIT_UNUSED) {
+ FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~(0xfff << shift)));
- FAIL_IF(push_inst(compiler, sljit_mem_simm[flags & 0x3] | (shift << 30)
- | RT(reg) | RN(arg) | ((argw & 0x1ff) << 12)));
- return -1;
-}
-
-/* see getput_arg below.
- Note: can_cache is called only for binary operators. Those
- operators always uses word arguments without write back. */
-static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
-{
- sljit_sw diff;
- if ((arg & OFFS_REG_MASK) || !(next_arg & SLJIT_MEM))
- return 0;
+ argw = (argw >> shift) & 0xfff;
- if (!(arg & REG_MASK)) {
- diff = argw - next_argw;
- if (diff <= 0xfff && diff >= -0xfff)
- return 1;
- return 0;
+ return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
}
- if (argw == next_argw)
- return 1;
-
- diff = argw - next_argw;
- if (arg == next_arg && diff <= 0xfff && diff >= -0xfff)
- return 1;
-
- return 0;
-}
-
-/* Emit the necessary instructions. See can_cache above. */
-static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg,
- sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
-{
- sljit_u32 shift = MEM_SIZE_SHIFT(flags);
- sljit_s32 tmp_r, other_r;
- sljit_sw diff;
-
- SLJIT_ASSERT(arg & SLJIT_MEM);
- if (!(next_arg & SLJIT_MEM)) {
- next_arg = 0;
- next_argw = 0;
- }
-
- tmp_r = ((flags & STORE) || (flags == (WORD_SIZE | SIGNED))) ? TMP_REG3 : reg;
-
- if (SLJIT_UNLIKELY((flags & UPDATE) && (arg & REG_MASK))) {
- /* Update only applies if a base register exists. */
- other_r = OFFS_REG(arg);
- if (!other_r) {
- other_r = arg & REG_MASK;
- SLJIT_ASSERT(other_r != reg);
-
- if (argw >= 0 && argw <= 0xffffff) {
- if ((argw & 0xfff) != 0)
- FAIL_IF(push_inst(compiler, ADDI | RD(other_r) | RN(other_r) | ((argw & 0xfff) << 10)));
- if (argw >> 12)
- FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(other_r) | RN(other_r) | ((argw >> 12) << 10)));
- return push_inst(compiler, sljit_mem_imm[flags & 0x3] | (shift << 30) | RT(reg) | RN(other_r));
- }
- else if (argw < 0 && argw >= -0xffffff) {
- argw = -argw;
- if ((argw & 0xfff) != 0)
- FAIL_IF(push_inst(compiler, SUBI | RD(other_r) | RN(other_r) | ((argw & 0xfff) << 10)));
- if (argw >> 12)
- FAIL_IF(push_inst(compiler, SUBI | (1 << 22) | RD(other_r) | RN(other_r) | ((argw >> 12) << 10)));
- return push_inst(compiler, sljit_mem_imm[flags & 0x3] | (shift << 30) | RT(reg) | RN(other_r));
- }
-
- if (compiler->cache_arg == SLJIT_MEM) {
- if (argw == compiler->cache_argw) {
- other_r = TMP_REG3;
- argw = 0;
- }
- else if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, argw - compiler->cache_argw) != SLJIT_ERR_UNSUPPORTED) {
- FAIL_IF(compiler->error);
- compiler->cache_argw = argw;
- other_r = TMP_REG3;
- argw = 0;
- }
- }
-
- if (argw) {
- FAIL_IF(load_immediate(compiler, TMP_REG3, argw));
- compiler->cache_arg = SLJIT_MEM;
- compiler->cache_argw = argw;
- other_r = TMP_REG3;
- argw = 0;
- }
- }
-
- /* No caching here. */
- arg &= REG_MASK;
- FAIL_IF(push_inst(compiler, sljit_mem_reg[flags & 0x3] | (shift << 30) | RT(reg) | RN(arg) | RM(other_r)));
- return push_inst(compiler, ADD | RD(arg) | RN(arg) | RM(other_r));
- }
-
- if (arg & OFFS_REG_MASK) {
- other_r = OFFS_REG(arg);
- arg &= REG_MASK;
- FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RN(arg) | RM(other_r) | ((argw & 0x3) << 10)));
- return push_inst(compiler, sljit_mem_imm[flags & 0x3] | (shift << 30) | RT(reg) | RN(tmp_r));
- }
-
- if (compiler->cache_arg == arg) {
- diff = argw - compiler->cache_argw;
- if (diff <= 255 && diff >= -256)
- return push_inst(compiler, sljit_mem_simm[flags & 0x3] | (shift << 30)
- | RT(reg) | RN(TMP_REG3) | ((diff & 0x1ff) << 12));
- if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, diff) != SLJIT_ERR_UNSUPPORTED) {
- FAIL_IF(compiler->error);
- return push_inst(compiler, sljit_mem_imm[flags & 0x3] | (shift << 30) | RT(reg) | RN(arg));
- }
- }
-
- diff = argw - next_argw;
- next_arg = (arg & REG_MASK) && (arg == next_arg) && diff <= 0xfff && diff >= -0xfff && diff != 0;
- arg &= REG_MASK;
-
- if (arg != SLJIT_UNUSED && argw >= 0 && argw <= 0xffffff && (argw & ((1 << shift) - 1)) == 0) {
- FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_r) | RN(arg) | ((argw >> 12) << 10)));
- return push_inst(compiler, sljit_mem_imm[flags & 0x3] | (shift << 30)
- | RT(reg) | RN(tmp_r) | ((argw & 0xfff) << (10 - shift)));
- }
-
- if (arg && compiler->cache_arg == SLJIT_MEM) {
- if (compiler->cache_argw == argw)
- return push_inst(compiler, sljit_mem_reg[flags & 0x3] | (shift << 30) | RT(reg) | RN(arg) | RM(TMP_REG3));
- if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, argw - compiler->cache_argw) != SLJIT_ERR_UNSUPPORTED) {
- FAIL_IF(compiler->error);
- compiler->cache_argw = argw;
- return push_inst(compiler, sljit_mem_reg[flags & 0x3] | (shift << 30) | RT(reg) | RN(arg) | RM(TMP_REG3));
+ if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
+ if ((argw >> shift) <= 0xfff) {
+ return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | (argw << (10 - shift)));
}
- }
- compiler->cache_argw = argw;
- if (next_arg && emit_set_delta(compiler, TMP_REG3, arg, argw) != SLJIT_ERR_UNSUPPORTED) {
- FAIL_IF(compiler->error);
- compiler->cache_arg = SLJIT_MEM | arg;
- arg = 0;
- }
- else {
- FAIL_IF(load_immediate(compiler, TMP_REG3, argw));
- compiler->cache_arg = SLJIT_MEM;
+ if (argw <= 0xffffff) {
+ FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | ((argw >> 12) << 10)));
- if (next_arg) {
- FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG3) | RN(TMP_REG3) | RM(arg)));
- compiler->cache_arg = SLJIT_MEM | arg;
- arg = 0;
+ argw = ((argw & 0xfff) >> shift);
+ return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
}
}
- if (arg)
- return push_inst(compiler, sljit_mem_reg[flags & 0x3] | (shift << 30) | RT(reg) | RN(arg) | RM(TMP_REG3));
- return push_inst(compiler, sljit_mem_imm[flags & 0x3] | (shift << 30) | RT(reg) | RN(TMP_REG3));
-}
+ if (argw <= 255 && argw >= -256)
+ return push_inst(compiler, STURBI | type | RT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
-static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
-{
- if (getput_arg_fast(compiler, flags, reg, arg, argw))
- return compiler->error;
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
- return getput_arg(compiler, flags, reg, arg, argw, 0, 0);
-}
+ FAIL_IF(load_immediate(compiler, tmp_reg, argw));
-static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w)
-{
- if (getput_arg_fast(compiler, flags, reg, arg1, arg1w))
- return compiler->error;
- return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w);
+ return push_inst(compiler, STRB | type | RT(reg) | RN(arg) | RM(tmp_reg));
}
/* --------------------------------------------------------------------- */
@@ -1088,14 +864,14 @@ static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, slji
/* --------------------------------------------------------------------- */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 i, tmp, offs, prev, saved_regs_size;
+ sljit_s32 args, i, tmp, offs, prev, saved_regs_size;
CHECK_ERROR();
- CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 0);
local_size += saved_regs_size + SLJIT_LOCALS_OFFSET;
@@ -1165,6 +941,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_SP) | (0 << 10)));
}
+ args = get_arg_count(arg_types);
+
if (args >= 1)
FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0) | RN(TMP_ZERO) | RM(SLJIT_R0)));
if (args >= 2)
@@ -1176,12 +954,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
CHECK_ERROR();
- CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 0) + SLJIT_LOCALS_OFFSET;
local_size = (local_size + 15) & ~0xf;
@@ -1314,9 +1092,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src, srcw);
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
-
if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) {
if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) {
SLJIT_ASSERT(reg_map[1] == 0 && reg_map[3] == 2 && reg_map[5] == 4);
@@ -1329,7 +1104,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
dst = 1;
/* Signed word sized load is the prefetch instruction. */
- return emit_op_mem(compiler, WORD_SIZE | SIGNED, dst, src, srcw);
+ return emit_op_mem(compiler, WORD_SIZE | SIGNED, dst, src, srcw, TMP_REG1);
}
return SLJIT_SUCCESS;
}
@@ -1337,106 +1112,67 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
op = GET_OPCODE(op);
- if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) {
+ if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
+ /* Both operands are registers. */
+ if (dst_r != TMP_REG1 && FAST_IS_REG(src))
+ return emit_op_imm(compiler, op | ((op_flags & SLJIT_I32_OP) ? INT_OP : 0), dst_r, TMP_REG1, src);
+
switch (op) {
case SLJIT_MOV:
case SLJIT_MOV_P:
- flags = WORD_SIZE;
+ mem_flags = WORD_SIZE;
break;
case SLJIT_MOV_U8:
- flags = BYTE_SIZE;
+ mem_flags = BYTE_SIZE;
if (src & SLJIT_IMM)
srcw = (sljit_u8)srcw;
break;
case SLJIT_MOV_S8:
- flags = BYTE_SIZE | SIGNED;
+ mem_flags = BYTE_SIZE | SIGNED;
if (src & SLJIT_IMM)
srcw = (sljit_s8)srcw;
break;
case SLJIT_MOV_U16:
- flags = HALF_SIZE;
+ mem_flags = HALF_SIZE;
if (src & SLJIT_IMM)
srcw = (sljit_u16)srcw;
break;
case SLJIT_MOV_S16:
- flags = HALF_SIZE | SIGNED;
+ mem_flags = HALF_SIZE | SIGNED;
if (src & SLJIT_IMM)
srcw = (sljit_s16)srcw;
break;
case SLJIT_MOV_U32:
- flags = INT_SIZE;
+ mem_flags = INT_SIZE;
if (src & SLJIT_IMM)
srcw = (sljit_u32)srcw;
break;
case SLJIT_MOV_S32:
- flags = INT_SIZE | SIGNED;
- if (src & SLJIT_IMM)
- srcw = (sljit_s32)srcw;
- break;
- case SLJIT_MOVU:
- case SLJIT_MOVU_P:
- flags = WORD_SIZE | UPDATE;
- break;
- case SLJIT_MOVU_U8:
- flags = BYTE_SIZE | UPDATE;
- if (src & SLJIT_IMM)
- srcw = (sljit_u8)srcw;
- break;
- case SLJIT_MOVU_S8:
- flags = BYTE_SIZE | SIGNED | UPDATE;
- if (src & SLJIT_IMM)
- srcw = (sljit_s8)srcw;
- break;
- case SLJIT_MOVU_U16:
- flags = HALF_SIZE | UPDATE;
- if (src & SLJIT_IMM)
- srcw = (sljit_u16)srcw;
- break;
- case SLJIT_MOVU_S16:
- flags = HALF_SIZE | SIGNED | UPDATE;
- if (src & SLJIT_IMM)
- srcw = (sljit_s16)srcw;
- break;
- case SLJIT_MOVU_U32:
- flags = INT_SIZE | UPDATE;
- if (src & SLJIT_IMM)
- srcw = (sljit_u32)srcw;
- break;
- case SLJIT_MOVU_S32:
- flags = INT_SIZE | SIGNED | UPDATE;
+ mem_flags = INT_SIZE | SIGNED;
if (src & SLJIT_IMM)
srcw = (sljit_s32)srcw;
break;
default:
SLJIT_UNREACHABLE();
- flags = 0;
+ mem_flags = 0;
break;
}
if (src & SLJIT_IMM)
FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG1, srcw));
- else if (src & SLJIT_MEM) {
- if (getput_arg_fast(compiler, flags, dst_r, src, srcw))
- FAIL_IF(compiler->error);
- else
- FAIL_IF(getput_arg(compiler, flags, dst_r, src, srcw, dst, dstw));
- } else {
- if (dst_r != TMP_REG1)
- return emit_op_imm(compiler, op | ((op_flags & SLJIT_I32_OP) ? INT_OP : 0), dst_r, TMP_REG1, src);
+ else if (!(src & SLJIT_MEM))
dst_r = src;
- }
+ else
+ FAIL_IF(emit_op_mem(compiler, mem_flags, dst_r, src, srcw, TMP_REG1));
- if (dst & SLJIT_MEM) {
- if (getput_arg_fast(compiler, flags | STORE, dst_r, dst, dstw))
- return compiler->error;
- else
- return getput_arg(compiler, flags | STORE, dst_r, dst, dstw, 0, 0);
- }
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
return SLJIT_SUCCESS;
}
flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0;
mem_flags = WORD_SIZE;
+
if (op_flags & SLJIT_I32_OP) {
flags |= INT_OP;
mem_flags = INT_SIZE;
@@ -1446,28 +1182,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
flags |= UNUSED_RETURN;
if (src & SLJIT_MEM) {
- if (getput_arg_fast(compiler, mem_flags, TMP_REG2, src, srcw))
- FAIL_IF(compiler->error);
- else
- FAIL_IF(getput_arg(compiler, mem_flags, TMP_REG2, src, srcw, dst, dstw));
+ FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src, srcw, TMP_REG2));
src = TMP_REG2;
}
- if (src & SLJIT_IMM) {
- flags |= ARG2_IMM;
- if (op_flags & SLJIT_I32_OP)
- srcw = (sljit_s32)srcw;
- } else
- srcw = src;
+ emit_op_imm(compiler, flags | op, dst_r, TMP_REG1, src);
- emit_op_imm(compiler, flags | op, dst_r, TMP_REG1, srcw);
-
- if (dst & SLJIT_MEM) {
- if (getput_arg_fast(compiler, mem_flags | STORE, dst_r, dst, dstw))
- return compiler->error;
- else
- return getput_arg(compiler, mem_flags | STORE, dst_r, dst, dstw, 0, 0);
- }
+ if (SLJIT_UNLIKELY(dst & SLJIT_MEM))
+ return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
return SLJIT_SUCCESS;
}
@@ -1484,15 +1206,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
-
if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
return SLJIT_SUCCESS;
dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
mem_flags = WORD_SIZE;
+
if (op & SLJIT_I32_OP) {
flags |= INT_OP;
mem_flags = INT_SIZE;
@@ -1501,46 +1221,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
if (dst == SLJIT_UNUSED)
flags |= UNUSED_RETURN;
- if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, mem_flags | STORE | ARG_TEST, TMP_REG1, dst, dstw))
- flags |= SLOW_DEST;
-
if (src1 & SLJIT_MEM) {
- if (getput_arg_fast(compiler, mem_flags, TMP_REG1, src1, src1w))
- FAIL_IF(compiler->error);
- else
- flags |= SLOW_SRC1;
- }
- if (src2 & SLJIT_MEM) {
- if (getput_arg_fast(compiler, mem_flags, TMP_REG2, src2, src2w))
- FAIL_IF(compiler->error);
- else
- flags |= SLOW_SRC2;
- }
-
- if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) {
- if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
- FAIL_IF(getput_arg(compiler, mem_flags, TMP_REG2, src2, src2w, src1, src1w));
- FAIL_IF(getput_arg(compiler, mem_flags, TMP_REG1, src1, src1w, dst, dstw));
- }
- else {
- FAIL_IF(getput_arg(compiler, mem_flags, TMP_REG1, src1, src1w, src2, src2w));
- FAIL_IF(getput_arg(compiler, mem_flags, TMP_REG2, src2, src2w, dst, dstw));
- }
+ FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, src1, src1w, TMP_REG1));
+ src1 = TMP_REG1;
}
- else if (flags & SLOW_SRC1)
- FAIL_IF(getput_arg(compiler, mem_flags, TMP_REG1, src1, src1w, dst, dstw));
- else if (flags & SLOW_SRC2)
- FAIL_IF(getput_arg(compiler, mem_flags, TMP_REG2, src2, src2w, dst, dstw));
- if (src1 & SLJIT_MEM)
- src1 = TMP_REG1;
- if (src2 & SLJIT_MEM)
+ if (src2 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src2, src2w, TMP_REG2));
src2 = TMP_REG2;
+ }
if (src1 & SLJIT_IMM)
flags |= ARG1_IMM;
else
src1w = src1;
+
if (src2 & SLJIT_IMM)
flags |= ARG2_IMM;
else
@@ -1548,14 +1243,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src1w, src2w);
- if (dst & SLJIT_MEM) {
- if (!(flags & SLOW_DEST)) {
- getput_arg_fast(compiler, mem_flags | STORE, dst_r, dst, dstw);
- return compiler->error;
- }
- return getput_arg(compiler, mem_flags | STORE, TMP_REG1, dst, dstw, 0, 0);
- }
-
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
return SLJIT_SUCCESS;
}
@@ -1568,7 +1257,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
{
CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- return reg;
+ return freg_map[reg];
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
@@ -1587,54 +1276,50 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
{
sljit_u32 shift = MEM_SIZE_SHIFT(flags);
- sljit_ins ins_bits = (shift << 30);
- sljit_s32 other_r;
- sljit_sw diff;
+ sljit_ins type = (shift << 30);
SLJIT_ASSERT(arg & SLJIT_MEM);
if (!(flags & STORE))
- ins_bits |= 1 << 22;
+ type |= 0x00400000;
if (arg & OFFS_REG_MASK) {
argw &= 3;
- if (!argw || argw == shift)
- return push_inst(compiler, STR_FR | ins_bits | VT(reg)
+ if (argw == 0 || argw == shift)
+ return push_inst(compiler, STR_FR | type | VT(reg)
| RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
- other_r = OFFS_REG(arg);
- arg &= REG_MASK;
- FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg) | RM(other_r) | (argw << 10)));
- arg = TMP_REG1;
- argw = 0;
+
+ FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
+ return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1));
}
arg &= REG_MASK;
- if (arg && argw >= 0 && ((argw >> shift) <= 0xfff) && (argw & ((1 << shift) - 1)) == 0)
- return push_inst(compiler, STR_FI | ins_bits | VT(reg) | RN(arg) | (argw << (10 - shift)));
-
- if (arg && argw <= 255 && argw >= -256)
- return push_inst(compiler, STUR_FI | ins_bits | VT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
-
- /* Slow cases */
- if (compiler->cache_arg == SLJIT_MEM && argw != compiler->cache_argw) {
- diff = argw - compiler->cache_argw;
- if (!arg && diff <= 255 && diff >= -256)
- return push_inst(compiler, STUR_FI | ins_bits | VT(reg) | RN(TMP_REG3) | ((diff & 0x1ff) << 12));
- if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, argw - compiler->cache_argw) != SLJIT_ERR_UNSUPPORTED) {
- FAIL_IF(compiler->error);
- compiler->cache_argw = argw;
- }
+
+ if (arg == SLJIT_UNUSED) {
+ FAIL_IF(load_immediate(compiler, TMP_REG1, argw & ~(0xfff << shift)));
+
+ argw = (argw >> shift) & 0xfff;
+
+ return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
}
- if (compiler->cache_arg != SLJIT_MEM || argw != compiler->cache_argw) {
- compiler->cache_arg = SLJIT_MEM;
- compiler->cache_argw = argw;
- FAIL_IF(load_immediate(compiler, TMP_REG3, argw));
+ if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
+ if ((argw >> shift) <= 0xfff)
+ return push_inst(compiler, STR_FI | type | VT(reg) | RN(arg) | (argw << (10 - shift)));
+
+ if (argw <= 0xffffff) {
+ FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(TMP_REG1) | RN(arg) | ((argw >> 12) << 10)));
+
+ argw = ((argw & 0xfff) >> shift);
+ return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
+ }
}
- if (arg & REG_MASK)
- return push_inst(compiler, STR_FR | ins_bits | VT(reg) | RN(arg) | RM(TMP_REG3));
- return push_inst(compiler, STR_FI | ins_bits | VT(reg) | RN(TMP_REG3));
+ if (argw <= 255 && argw >= -256)
+ return push_inst(compiler, STUR_FI | type | VT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, argw));
+ return push_inst(compiler, STR_FR | type | VT(reg) | RN(arg) | RM(TMP_REG1));
}
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1655,7 +1340,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
FAIL_IF(push_inst(compiler, (FCVTZS ^ inv_bits) | RD(dst_r) | VN(src)));
if (dst & SLJIT_MEM)
- return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw);
+ return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw, TMP_REG2);
return SLJIT_SUCCESS;
}
@@ -1670,7 +1355,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
inv_bits |= (1 << 31);
if (src & SLJIT_MEM) {
- emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw);
+ emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw, TMP_REG1);
src = TMP_REG1;
} else if (src & SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -1716,17 +1401,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
sljit_ins inv_bits;
CHECK_ERROR();
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
- SLJIT_COMPILE_ASSERT((INT_SIZE ^ 0x100) == WORD_SIZE, must_be_one_bit_difference);
+ SLJIT_COMPILE_ASSERT((INT_SIZE ^ 0x1) == WORD_SIZE, must_be_one_bit_difference);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_MEM) {
- emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x100) : mem_flags, dst_r, src, srcw);
+ emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x1) : mem_flags, dst_r, src, srcw);
src = dst_r;
}
@@ -1769,9 +1452,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
-
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src1 & SLJIT_MEM) {
emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
@@ -1816,7 +1496,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(TMP_LR));
/* Memory. */
- return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_LR, dst, dstw);
+ return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_LR, dst, dstw, TMP_REG1);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw)
@@ -1827,10 +1507,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
if (FAST_IS_REG(src))
FAIL_IF(push_inst(compiler, ORR | RD(TMP_LR) | RN(TMP_ZERO) | RM(src)));
- else if (src & SLJIT_MEM)
- FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_LR, src, srcw));
- else if (src & SLJIT_IMM)
- FAIL_IF(load_immediate(compiler, TMP_LR, srcw));
+ else
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_LR, src, srcw, TMP_REG1));
return push_inst(compiler, RET | RN(TMP_LR));
}
@@ -1936,6 +1614,20 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
return jump;
}
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_jump(compiler, type);
+}
+
static SLJIT_INLINE struct sljit_jump* emit_cmp_to0(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 src, sljit_sw srcw)
{
@@ -1951,13 +1643,14 @@ static SLJIT_INLINE struct sljit_jump* emit_cmp_to0(struct sljit_compiler *compi
jump->flags |= IS_CBZ | IS_COND;
if (src & SLJIT_MEM) {
- PTR_FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG1, src, srcw));
+ PTR_FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
else if (src & SLJIT_IMM) {
PTR_FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
src = TMP_REG1;
}
+
SLJIT_ASSERT(FAST_IS_REG(src));
if ((type & 0xff) == SLJIT_EQUAL)
@@ -1978,15 +1671,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
ADJUST_LOCAL_OFFSET(src, srcw);
- /* In ARM, we don't need to touch the arguments. */
if (!(src & SLJIT_IMM)) {
if (src & SLJIT_MEM) {
- FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw));
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(src));
}
+ /* These jumps are converted to jump/call instructions when possible. */
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0));
@@ -1997,6 +1690,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(TMP_REG1));
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_ijump(compiler, type, src, srcw);
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
@@ -2013,15 +1721,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
if (GET_OPCODE(op) < SLJIT_ADD) {
FAIL_IF(push_inst(compiler, CSINC | (cc << 12) | RD(dst_r) | RN(TMP_ZERO) | RM(TMP_ZERO)));
- if (dst_r != TMP_REG1)
- return SLJIT_SUCCESS;
- return emit_op_mem(compiler, (GET_OPCODE(op) == SLJIT_MOV ? WORD_SIZE : INT_SIZE) | STORE, TMP_REG1, dst, dstw);
+
+ if (dst_r == TMP_REG1) {
+ mem_flags = (GET_OPCODE(op) == SLJIT_MOV ? WORD_SIZE : INT_SIZE) | STORE;
+ return emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG2);
+ }
+
+ return SLJIT_SUCCESS;
}
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
mem_flags = WORD_SIZE;
+
if (op & SLJIT_I32_OP) {
flags |= INT_OP;
mem_flags = INT_SIZE;
@@ -2030,7 +1741,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
src_r = dst;
if (dst & SLJIT_MEM) {
- FAIL_IF(emit_op_mem2(compiler, mem_flags, TMP_REG1, dst, dstw, dst, dstw));
+ FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG1));
src_r = TMP_REG1;
}
@@ -2038,7 +1749,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src_r, TMP_REG2);
if (dst & SLJIT_MEM)
- return emit_op_mem2(compiler, mem_flags | STORE, TMP_REG1, dst, dstw, 0, 0);
+ return emit_op_mem(compiler, mem_flags | STORE, TMP_REG1, dst, dstw, TMP_REG2);
return SLJIT_SUCCESS;
}
@@ -2066,6 +1777,85 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
return push_inst(compiler, (CSEL ^ inv_bits) | (cc << 12) | RD(dst_reg) | RN(dst_reg) | RM(src));
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_u32 sign = 0, inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -256))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_MEM_SUPP)
+ return SLJIT_SUCCESS;
+
+ switch (type & 0xff) {
+ case SLJIT_MOV:
+ case SLJIT_MOV_P:
+ inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400;
+ break;
+ case SLJIT_MOV_S8:
+ sign = 1;
+ case SLJIT_MOV_U8:
+ inst = STURBI | (MEM_SIZE_SHIFT(BYTE_SIZE) << 30) | 0x400;
+ break;
+ case SLJIT_MOV_S16:
+ sign = 1;
+ case SLJIT_MOV_U16:
+ inst = STURBI | (MEM_SIZE_SHIFT(HALF_SIZE) << 30) | 0x400;
+ break;
+ case SLJIT_MOV_S32:
+ sign = 1;
+ case SLJIT_MOV_U32:
+ inst = STURBI | (MEM_SIZE_SHIFT(INT_SIZE) << 30) | 0x400;
+ break;
+ default:
+ SLJIT_UNREACHABLE();
+ inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400;
+ break;
+ }
+
+ if (!(type & SLJIT_MEM_STORE))
+ inst |= sign ? 0x00800000 : 0x00400000;
+
+ if (type & SLJIT_MEM_PRE)
+ inst |= 0x800;
+
+ return push_inst(compiler, inst | RT(reg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_u32 inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
+
+ if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -256))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_MEM_SUPP)
+ return SLJIT_SUCCESS;
+
+ inst = STUR_FI | 0x80000400;
+
+ if (!(type & SLJIT_F32_OP))
+ inst |= 0x40000000;
+
+ if (!(type & SLJIT_MEM_STORE))
+ inst |= 0x00400000;
+
+ if (type & SLJIT_MEM_PRE)
+ inst |= 0x800;
+
+ return push_inst(compiler, inst | VT(freg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
+}
+
SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
{
struct sljit_const *const_;
@@ -2083,7 +1873,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi
PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, init_value));
if (dst & SLJIT_MEM)
- PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw));
+ PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2));
return const_;
}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
index 29e5566a82..75e7a38b5f 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
@@ -26,7 +26,11 @@
SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
{
- return "ARM-Thumb2" SLJIT_CPUINFO;
+#ifdef __SOFTFP__
+ return "ARM-Thumb2" SLJIT_CPUINFO " ABI:softfp";
+#else
+ return "ARM-Thumb2" SLJIT_CPUINFO " ABI:hardfp";
+#endif
}
/* Length of an instruction word. */
@@ -37,12 +41,16 @@ typedef sljit_u32 sljit_ins;
#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
#define TMP_PC (SLJIT_NUMBER_OF_REGISTERS + 4)
-#define TMP_FREG1 (0)
-#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
/* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
- 0, 0, 1, 2, 12, 11, 10, 9, 8, 7, 6, 5, 4, 13, 3, 14, 15
+ 0, 0, 1, 2, 3, 11, 10, 9, 8, 7, 6, 5, 4, 13, 12, 14, 15
+};
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
+ 0, 0, 1, 2, 3, 4, 5, 6, 7
};
#define COPY_BITS(src, from, to, bits) \
@@ -69,9 +77,9 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
#define RN4(rn) (reg_map[rn] << 16)
#define RM4(rm) (reg_map[rm])
#define RT4(rt) (reg_map[rt] << 12)
-#define DD4(dd) ((dd) << 12)
-#define DN4(dn) ((dn) << 16)
-#define DM4(dm) (dm)
+#define DD4(dd) (freg_map[dd] << 12)
+#define DN4(dn) (freg_map[dn] << 16)
+#define DM4(dm) (freg_map[dm])
#define IMM5(imm) \
(COPY_BITS(imm, 2, 12, 3) | ((imm & 0x3) << 6))
#define IMM12(imm) \
@@ -178,6 +186,7 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
#define VDIV_F32 0xee800a00
#define VMOV_F32 0xeeb00a40
#define VMOV 0xee000a10
+#define VMOV2 0xec400a10
#define VMRS 0xeef1fa10
#define VMUL_F32 0xee200a00
#define VNEG_F32 0xeeb10a40
@@ -208,10 +217,10 @@ static sljit_s32 push_inst32(struct sljit_compiler *compiler, sljit_ins inst)
static SLJIT_INLINE sljit_s32 emit_imm32_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_uw imm)
{
- FAIL_IF(push_inst32(compiler, MOVW | RD4(dst) |
- COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff)));
- return push_inst32(compiler, MOVT | RD4(dst) |
- COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16));
+ FAIL_IF(push_inst32(compiler, MOVW | RD4(dst)
+ | COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff)));
+ return push_inst32(compiler, MOVT | RD4(dst)
+ | COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16));
}
static SLJIT_INLINE void modify_imm32_const(sljit_u16 *inst, sljit_uw new_imm)
@@ -444,7 +453,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
return 1;
#endif
- case SLJIT_HAS_PRE_UPDATE:
case SLJIT_HAS_CLZ:
case SLJIT_HAS_CMOV:
return 1;
@@ -522,13 +530,13 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst,
}
/* set low 16 bits, set hi 16 bits to 0. */
- FAIL_IF(push_inst32(compiler, MOVW | RD4(dst) |
- COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff)));
+ FAIL_IF(push_inst32(compiler, MOVW | RD4(dst)
+ | COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff)));
/* set hi 16 bit if needed. */
if (imm >= 0x10000)
- return push_inst32(compiler, MOVT | RD4(dst) |
- COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16));
+ return push_inst32(compiler, MOVT | RD4(dst)
+ | COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16));
return SLJIT_SUCCESS;
}
@@ -729,34 +737,26 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
case SLJIT_MOV_P:
- case SLJIT_MOVU:
- case SLJIT_MOVU_U32:
- case SLJIT_MOVU_S32:
- case SLJIT_MOVU_P:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2);
if (dst == arg2)
return SLJIT_SUCCESS;
return push_inst16(compiler, MOV | SET_REGS44(dst, arg2));
case SLJIT_MOV_U8:
- case SLJIT_MOVU_U8:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2);
if (IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, UXTB | RD3(dst) | RN3(arg2));
return push_inst32(compiler, UXTB_W | RD4(dst) | RM4(arg2));
case SLJIT_MOV_S8:
- case SLJIT_MOVU_S8:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2);
if (IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, SXTB | RD3(dst) | RN3(arg2));
return push_inst32(compiler, SXTB_W | RD4(dst) | RM4(arg2));
case SLJIT_MOV_U16:
- case SLJIT_MOVU_U16:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2);
if (IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, UXTH | RD3(dst) | RN3(arg2));
return push_inst32(compiler, UXTH_W | RD4(dst) | RM4(arg2));
case SLJIT_MOV_S16:
- case SLJIT_MOVU_S16:
SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2);
if (IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, SXTH | RD3(dst) | RN3(arg2));
@@ -840,8 +840,6 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
#define HALF_SIZE 0x08
#define PRELOAD 0x0c
-#define UPDATE 0x10
-
#define IS_WORD_SIZE(flags) (!(flags & (BYTE_SIZE | HALF_SIZE)))
#define OFFSET_CHECK(imm, shift) (!(argw & ~(imm << shift)))
@@ -940,12 +938,10 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
{
sljit_s32 other_r;
- sljit_s32 update = flags & UPDATE;
sljit_uw tmp;
SLJIT_ASSERT(arg & SLJIT_MEM);
SLJIT_ASSERT((arg & REG_MASK) != tmp_reg);
- flags &= ~UPDATE;
arg &= ~SLJIT_MEM;
if (SLJIT_UNLIKELY(!(arg & REG_MASK))) {
@@ -961,63 +957,6 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(tmp_reg));
}
- if (SLJIT_UNLIKELY(update)) {
- SLJIT_ASSERT(reg != arg);
-
- if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
- other_r = OFFS_REG(arg);
- arg &= 0xf;
-
- if (IS_3_LO_REGS(reg, arg, other_r))
- FAIL_IF(push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(other_r)));
- else
- FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(other_r)));
- return push_inst16(compiler, ADD | SET_REGS44(arg, other_r));
- }
-
- if (argw > 0xff) {
- tmp = get_imm(argw & ~0xff);
- if (tmp != INVALID_IMM) {
- push_inst32(compiler, ADD_WI | RD4(arg) | RN4(arg) | tmp);
- argw = argw & 0xff;
- }
- }
- else if (argw < -0xff) {
- tmp = get_imm(-argw & ~0xff);
- if (tmp != INVALID_IMM) {
- push_inst32(compiler, SUB_WI | RD4(arg) | RN4(arg) | tmp);
- argw = -(-argw & 0xff);
- }
- }
-
- if (argw == 0) {
- if (IS_2_LO_REGS(reg, arg) && sljit_mem16_imm5[flags])
- return push_inst16(compiler, sljit_mem16_imm5[flags] | RD3(reg) | RN3(arg));
- return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(arg));
- }
-
- if (argw <= 0xff && argw >= -0xff) {
- if (argw >= 0)
- argw |= 0x200;
- else {
- argw = -argw;
- }
-
- SLJIT_ASSERT(argw >= 0 && (argw & 0xff) <= 0xff);
- return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(arg) | 0x100 | argw);
- }
-
- FAIL_IF(load_immediate(compiler, tmp_reg, argw));
-
- SLJIT_ASSERT(reg != tmp_reg);
-
- if (IS_3_LO_REGS(reg, arg, tmp_reg))
- FAIL_IF(push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(tmp_reg)));
- else
- FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(tmp_reg)));
- return push_inst16(compiler, ADD | SET_REGS44(arg, tmp_reg));
- }
-
if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
argw &= 0x3;
other_r = OFFS_REG(arg);
@@ -1088,15 +1027,15 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit
/* --------------------------------------------------------------------- */
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 size, i, tmp;
+ sljit_s32 args, size, i, tmp;
sljit_ins push = 0;
CHECK_ERROR();
- CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
for (i = SLJIT_S0; i >= tmp; i--)
@@ -1120,6 +1059,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
FAIL_IF(emit_op_imm(compiler, SLJIT_SUB | ARG2_IMM, SLJIT_SP, SLJIT_SP, local_size));
}
+ args = get_arg_count(arg_types);
+
if (args >= 1)
FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_S0, SLJIT_R0)));
if (args >= 2)
@@ -1131,14 +1072,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
sljit_s32 size;
CHECK_ERROR();
- CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
compiler->local_size = ((size + local_size + 7) & ~7) - size;
@@ -1219,11 +1160,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
case SLJIT_DIV_UW:
case SLJIT_DIV_SW:
SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
- SLJIT_ASSERT(reg_map[2] == 1 && reg_map[3] == 2 && reg_map[4] == 12);
+ SLJIT_ASSERT(reg_map[2] == 1 && reg_map[3] == 2 && reg_map[4] == 3);
saved_reg_count = 0;
if (compiler->scratches >= 4)
- saved_reg_list[saved_reg_count++] = 12;
+ saved_reg_list[saved_reg_count++] = 3;
if (compiler->scratches >= 3)
saved_reg_list[saved_reg_count++] = 2;
if (op >= SLJIT_DIV_UW)
@@ -1289,7 +1230,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
op = GET_OPCODE(op);
- if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) {
+ if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
switch (op) {
case SLJIT_MOV:
case SLJIT_MOV_U32:
@@ -1317,32 +1258,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
if (src & SLJIT_IMM)
srcw = (sljit_s16)srcw;
break;
- case SLJIT_MOVU:
- case SLJIT_MOVU_U32:
- case SLJIT_MOVU_S32:
- case SLJIT_MOVU_P:
- flags = WORD_SIZE | UPDATE;
- break;
- case SLJIT_MOVU_U8:
- flags = BYTE_SIZE | UPDATE;
- if (src & SLJIT_IMM)
- srcw = (sljit_u8)srcw;
- break;
- case SLJIT_MOVU_S8:
- flags = BYTE_SIZE | SIGNED | UPDATE;
- if (src & SLJIT_IMM)
- srcw = (sljit_s8)srcw;
- break;
- case SLJIT_MOVU_U16:
- flags = HALF_SIZE | UPDATE;
- if (src & SLJIT_IMM)
- srcw = (sljit_u16)srcw;
- break;
- case SLJIT_MOVU_S16:
- flags = HALF_SIZE | SIGNED | UPDATE;
- if (src & SLJIT_IMM)
- srcw = (sljit_s16)srcw;
- break;
default:
SLJIT_UNREACHABLE();
flags = 0;
@@ -1352,7 +1267,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
if (src & SLJIT_IMM)
FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG2, srcw));
else if (src & SLJIT_MEM) {
- FAIL_IF(emit_op_mem(compiler, flags, dst_r, src, srcw, ((flags & UPDATE) && dst_r == TMP_REG1) ? TMP_REG2 : TMP_REG1));
+ FAIL_IF(emit_op_mem(compiler, flags, dst_r, src, srcw, TMP_REG1));
} else {
if (dst_r != TMP_REG1)
return emit_op_imm(compiler, op, dst_r, TMP_REG2, src);
@@ -1362,7 +1277,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
if (!(dst & SLJIT_MEM))
return SLJIT_SUCCESS;
- return emit_op_mem(compiler, flags | STORE, dst_r, dst, dstw, (dst_r == TMP_REG1) ? TMP_REG2 : TMP_REG1);
+ return emit_op_mem(compiler, flags | STORE, dst_r, dst, dstw, TMP_REG2);
}
if (op == SLJIT_NEG) {
@@ -1375,20 +1290,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0;
- if (src & SLJIT_IMM)
- flags |= ARG2_IMM;
- else if (src & SLJIT_MEM) {
+ if (src & SLJIT_MEM) {
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
- srcw = TMP_REG1;
+ src = TMP_REG1;
}
- else
- srcw = src;
- emit_op_imm(compiler, flags | op, dst_r, TMP_REG2, srcw);
+ emit_op_imm(compiler, flags | op, dst_r, TMP_REG2, src);
- if (!(dst & SLJIT_MEM))
- return SLJIT_SUCCESS;
- return emit_op_mem(compiler, flags | STORE, dst_r, dst, dstw, TMP_REG2);
+ if (SLJIT_UNLIKELY(dst & SLJIT_MEM))
+ return emit_op_mem(compiler, flags | STORE, dst_r, dst, dstw, TMP_REG2);
+ return SLJIT_SUCCESS;
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1448,7 +1359,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
{
CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- return reg << 1;
+ return (freg_map[reg] << 1);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
@@ -1702,11 +1613,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
if (FAST_IS_REG(src))
FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG2, src)));
- else if (src & SLJIT_MEM) {
+ else
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, src, srcw, TMP_REG2));
- }
- else if (src & SLJIT_IMM)
- FAIL_IF(load_immediate(compiler, TMP_REG2, srcw));
+
return push_inst16(compiler, BX | RN3(TMP_REG2));
}
@@ -1798,7 +1707,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
type &= 0xff;
- /* In ARM, we don't need to touch the arguments. */
PTR_FAIL_IF(emit_imm32_const(compiler, TMP_REG1, 0));
if (type < SLJIT_JUMP) {
jump->flags |= IS_COND;
@@ -1818,6 +1726,241 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
return jump;
}
+#ifdef __SOFTFP__
+
+static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src)
+{
+ sljit_s32 stack_offset = 0;
+ sljit_s32 arg_count = 0;
+ sljit_s32 word_arg_offset = 0;
+ sljit_s32 float_arg_count = 0;
+ sljit_s32 types = 0;
+ sljit_s32 src_offset = 4 * sizeof(sljit_sw);
+ sljit_u8 offsets[4];
+
+ if (src && FAST_IS_REG(*src))
+ src_offset = reg_map[*src] * sizeof(sljit_sw);
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ offsets[arg_count] = (sljit_u8)stack_offset;
+ stack_offset += sizeof(sljit_f32);
+ arg_count++;
+ float_arg_count++;
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ if (stack_offset & 0x7)
+ stack_offset += sizeof(sljit_sw);
+ offsets[arg_count] = (sljit_u8)stack_offset;
+ stack_offset += sizeof(sljit_f64);
+ arg_count++;
+ float_arg_count++;
+ break;
+ default:
+ offsets[arg_count] = (sljit_u8)stack_offset;
+ stack_offset += sizeof(sljit_sw);
+ arg_count++;
+ word_arg_offset += sizeof(sljit_sw);
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ if (stack_offset > 16)
+ FAIL_IF(push_inst16(compiler, SUB_SP | (((stack_offset - 16) + 0x7) & ~0x7) >> 2));
+
+ SLJIT_ASSERT(reg_map[TMP_REG1] == 12);
+
+ /* Process arguments in reversed direction. */
+ while (types) {
+ switch (types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ arg_count--;
+ float_arg_count--;
+ stack_offset = offsets[arg_count];
+
+ if (stack_offset < 16) {
+ if (src_offset == stack_offset) {
+ FAIL_IF(push_inst16(compiler, MOV | (src_offset << 1) | 4 | (1 << 7)));
+ *src = TMP_REG1;
+ }
+ FAIL_IF(push_inst32(compiler, VMOV | 0x100000 | (float_arg_count << 16) | (stack_offset << 10)));
+ } else
+ FAIL_IF(push_inst32(compiler, VSTR_F32 | 0x800000 | RN4(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2)));
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ arg_count--;
+ float_arg_count--;
+ stack_offset = offsets[arg_count];
+
+ SLJIT_ASSERT((stack_offset & 0x7) == 0);
+
+ if (stack_offset < 16) {
+ if (src_offset == stack_offset || src_offset == stack_offset + sizeof(sljit_sw)) {
+ FAIL_IF(push_inst16(compiler, MOV | (src_offset << 1) | 4 | (1 << 7)));
+ *src = TMP_REG1;
+ }
+ FAIL_IF(push_inst32(compiler, VMOV2 | 0x100000 | (stack_offset << 10) | ((stack_offset + sizeof(sljit_sw)) << 14) | float_arg_count));
+ } else
+ FAIL_IF(push_inst32(compiler, VSTR_F32 | 0x800100 | RN4(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2)));
+ break;
+ default:
+ arg_count--;
+ word_arg_offset -= sizeof(sljit_sw);
+ stack_offset = offsets[arg_count];
+
+ SLJIT_ASSERT(stack_offset >= word_arg_offset);
+
+ if (stack_offset != word_arg_offset) {
+ if (stack_offset < 16) {
+ if (src_offset == stack_offset) {
+ FAIL_IF(push_inst16(compiler, MOV | (src_offset << 1) | 4 | (1 << 7)));
+ *src = TMP_REG1;
+ }
+ else if (src_offset == word_arg_offset) {
+ *src = 1 + (stack_offset >> 2);
+ src_offset = stack_offset;
+ }
+ FAIL_IF(push_inst16(compiler, MOV | (stack_offset >> 2) | (word_arg_offset << 1)));
+ } else
+ FAIL_IF(push_inst16(compiler, STR_SP | (word_arg_offset << 6) | ((stack_offset - 16) >> 2)));
+ }
+ break;
+ }
+
+ types >>= SLJIT_DEF_SHIFT;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 softfloat_post_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
+{
+ sljit_s32 stack_size = 0;
+
+ if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32)
+ FAIL_IF(push_inst32(compiler, VMOV | (0 << 16) | (0 << 12)));
+ if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64)
+ FAIL_IF(push_inst32(compiler, VMOV2 | (1 << 16) | (0 << 12) | 0));
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ stack_size += sizeof(sljit_f32);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ if (stack_size & 0x7)
+ stack_size += sizeof(sljit_sw);
+ stack_size += sizeof(sljit_f64);
+ break;
+ default:
+ stack_size += sizeof(sljit_sw);
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ if (stack_size <= 16)
+ return SLJIT_SUCCESS;
+
+ return push_inst16(compiler, ADD_SP | ((((stack_size - 16) + 0x7) & ~0x7) >> 2));
+}
+
+#else
+
+static sljit_s32 hardfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
+{
+ sljit_u32 remap = 0;
+ sljit_u32 offset = 0;
+ sljit_u32 new_offset, mask;
+
+ /* Remove return value. */
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32) {
+ new_offset = 0;
+ mask = 1;
+
+ while (remap & mask) {
+ new_offset++;
+ mask <<= 1;
+ }
+ remap |= mask;
+
+ if (offset != new_offset)
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | DD4((new_offset >> 1) + 1)
+ | ((new_offset & 0x1) ? 0x400000 : 0) | DM4((offset >> 1) + 1)));
+
+ offset += 2;
+ }
+ else if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64) {
+ new_offset = 0;
+ mask = 3;
+
+ while (remap & mask) {
+ new_offset += 2;
+ mask <<= 2;
+ }
+ remap |= mask;
+
+ if (offset != new_offset)
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | SLJIT_F32_OP | DD4((new_offset >> 1) + 1) | DM4((offset >> 1) + 1)));
+
+ offset += 2;
+ }
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+#endif
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+#ifdef __SOFTFP__
+ struct sljit_jump *jump;
+#endif
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+#ifdef __SOFTFP__
+ PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ jump = sljit_emit_jump(compiler, type);
+ PTR_FAIL_IF(jump == NULL);
+
+ PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types));
+ return jump;
+#else
+ PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_jump(compiler, type);
+#endif
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
{
struct sljit_jump *jump;
@@ -1826,16 +1969,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
ADJUST_LOCAL_OFFSET(src, srcw);
- /* In ARM, we don't need to touch the arguments. */
+ SLJIT_ASSERT(reg_map[TMP_REG1] != 14);
+
if (!(src & SLJIT_IMM)) {
- if (FAST_IS_REG(src))
+ if (FAST_IS_REG(src)) {
+ SLJIT_ASSERT(reg_map[src] != 14);
return push_inst16(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RN3(src));
+ }
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, src, srcw, TMP_REG1));
if (type >= SLJIT_FAST_CALL)
return push_inst16(compiler, BLX | RN3(TMP_REG1));
}
+ /* These jumps are converted to jump/call instructions when possible. */
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0));
@@ -1846,6 +1993,41 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
return push_inst16(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RN3(TMP_REG1));
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+#ifdef __SOFTFP__
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
+ src = TMP_REG1;
+ }
+
+ FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
+
+ return softfloat_post_call_with_args(compiler, arg_types);
+#else /* !__SOFTFP__ */
+ FAIL_IF(hardfloat_call_with_args(compiler, arg_types));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_ijump(compiler, type, src, srcw);
+#endif /* __SOFTFP__ */
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
@@ -1896,8 +2078,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
return SLJIT_SUCCESS;
/* The condition must always be set, even if the ORR/EORI is not executed above. */
- if (reg_map[dst_r] <= 7)
- return push_inst16(compiler, MOVS | RD3(TMP_REG1) | RN3(dst_r));
return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(TMP_REG1) | RM4(dst_r));
}
@@ -1924,8 +2104,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
if (tmp < 0x10000) {
/* set low 16 bits, set hi 16 bits to 0. */
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8));
- return push_inst32(compiler, MOVW | RD4(dst_reg) |
- COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff));
+ return push_inst32(compiler, MOVW | RD4(dst_reg)
+ | COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff));
}
tmp = get_imm(srcw);
@@ -1943,10 +2123,67 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | ((cc & 0x1) << 3) | 0x4));
tmp = (sljit_uw) srcw;
- FAIL_IF(push_inst32(compiler, MOVW | RD4(dst_reg) |
- COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff)));
- return push_inst32(compiler, MOVT | RD4(dst_reg) |
- COPY_BITS(tmp, 12 + 16, 16, 4) | COPY_BITS(tmp, 11 + 16, 26, 1) | COPY_BITS(tmp, 8 + 16, 12, 3) | ((tmp & 0xff0000) >> 16));
+ FAIL_IF(push_inst32(compiler, MOVW | RD4(dst_reg)
+ | COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff)));
+ return push_inst32(compiler, MOVT | RD4(dst_reg)
+ | COPY_BITS(tmp, 12 + 16, 16, 4) | COPY_BITS(tmp, 11 + 16, 26, 1) | COPY_BITS(tmp, 8 + 16, 12, 3) | ((tmp & 0xff0000) >> 16));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_s32 flags;
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -255))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_MEM_SUPP)
+ return SLJIT_SUCCESS;
+
+ switch (type & 0xff) {
+ case SLJIT_MOV:
+ case SLJIT_MOV_U32:
+ case SLJIT_MOV_S32:
+ case SLJIT_MOV_P:
+ flags = WORD_SIZE;
+ break;
+ case SLJIT_MOV_U8:
+ flags = BYTE_SIZE;
+ break;
+ case SLJIT_MOV_S8:
+ flags = BYTE_SIZE | SIGNED;
+ break;
+ case SLJIT_MOV_U16:
+ flags = HALF_SIZE;
+ break;
+ case SLJIT_MOV_S16:
+ flags = HALF_SIZE | SIGNED;
+ break;
+ default:
+ SLJIT_UNREACHABLE();
+ flags = WORD_SIZE;
+ break;
+ }
+
+ if (type & SLJIT_MEM_STORE)
+ flags |= STORE;
+
+ inst = sljit_mem32[flags] | 0x900;
+
+ if (type & SLJIT_MEM_PRE)
+ inst |= 0x400;
+
+ if (memw >= 0)
+ inst |= 0x200;
+ else
+ memw = -memw;
+
+ return push_inst32(compiler, inst | RT4(reg) | RN4(mem & REG_MASK) | memw);
}
SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c
index 62e16106b1..9f9e157a05 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_32.c
@@ -435,3 +435,232 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_consta
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 2);
}
+
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_ins *ins_ptr)
+{
+ sljit_s32 stack_offset = 0;
+ sljit_s32 arg_count = 0;
+ sljit_s32 float_arg_count = 0;
+ sljit_s32 word_arg_count = 0;
+ sljit_s32 types = 0;
+ sljit_s32 arg_count_save, types_save;
+ sljit_ins prev_ins = NOP;
+ sljit_ins ins = NOP;
+ sljit_u8 offsets[4];
+
+ SLJIT_ASSERT(reg_map[TMP_REG3] == 4 && freg_map[TMP_FREG1] == 12);
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ offsets[arg_count] = (sljit_u8)stack_offset;
+
+ if (word_arg_count == 0 && arg_count <= 1)
+ offsets[arg_count] = 254 + arg_count;
+
+ stack_offset += sizeof(sljit_f32);
+ arg_count++;
+ float_arg_count++;
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ if (stack_offset & 0x7)
+ stack_offset += sizeof(sljit_sw);
+ offsets[arg_count] = (sljit_u8)stack_offset;
+
+ if (word_arg_count == 0 && arg_count <= 1)
+ offsets[arg_count] = 254 + arg_count;
+
+ stack_offset += sizeof(sljit_f64);
+ arg_count++;
+ float_arg_count++;
+ break;
+ default:
+ offsets[arg_count] = (sljit_u8)stack_offset;
+ stack_offset += sizeof(sljit_sw);
+ arg_count++;
+ word_arg_count++;
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ /* Stack is aligned to 16 bytes, max two doubles can be placed on the stack. */
+ if (stack_offset > 16)
+ FAIL_IF(push_inst(compiler, ADDIU | S(SLJIT_SP) | T(SLJIT_SP) | IMM(-16), DR(SLJIT_SP)));
+
+ types_save = types;
+ arg_count_save = arg_count;
+
+ while (types) {
+ switch (types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ arg_count--;
+ if (offsets[arg_count] < 254)
+ ins = SWC1 | S(SLJIT_SP) | FT(float_arg_count) | IMM(offsets[arg_count]);
+ float_arg_count--;
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ arg_count--;
+ if (offsets[arg_count] < 254)
+ ins = SDC1 | S(SLJIT_SP) | FT(float_arg_count) | IMM(offsets[arg_count]);
+ float_arg_count--;
+ break;
+ default:
+ if (offsets[arg_count - 1] >= 16)
+ ins = SW | S(SLJIT_SP) | T(word_arg_count) | IMM(offsets[arg_count - 1]);
+ else if (arg_count != word_arg_count)
+ ins = ADDU | S(word_arg_count) | TA(0) | DA(4 + (offsets[arg_count - 1] >> 2));
+ else if (arg_count == 1)
+ ins = ADDU | S(SLJIT_R0) | TA(0) | D(TMP_REG3);
+
+ arg_count--;
+ word_arg_count--;
+ break;
+ }
+
+ if (ins != NOP) {
+ if (prev_ins != NOP)
+ FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS));
+ prev_ins = ins;
+ ins = NOP;
+ }
+
+ types >>= SLJIT_DEF_SHIFT;
+ }
+
+ types = types_save;
+ arg_count = arg_count_save;
+
+ while (types) {
+ switch (types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ arg_count--;
+ if (offsets[arg_count] == 254)
+ ins = MOV_S | FMT_S | FS(SLJIT_FR0) | FD(TMP_FREG1);
+ else if (offsets[arg_count] < 16)
+ ins = LW | S(SLJIT_SP) | TA(4 + (offsets[arg_count] >> 2)) | IMM(offsets[arg_count]);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ arg_count--;
+ if (offsets[arg_count] == 254)
+ ins = MOV_S | FMT_D | FS(SLJIT_FR0) | FD(TMP_FREG1);
+ else if (offsets[arg_count] < 16) {
+ if (prev_ins != NOP)
+ FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS));
+ prev_ins = LW | S(SLJIT_SP) | TA(4 + (offsets[arg_count] >> 2)) | IMM(offsets[arg_count]);
+ ins = LW | S(SLJIT_SP) | TA(5 + (offsets[arg_count] >> 2)) | IMM(offsets[arg_count] + sizeof(sljit_sw));
+ }
+ break;
+ default:
+ arg_count--;
+ break;
+ }
+
+ if (ins != NOP) {
+ if (prev_ins != NOP)
+ FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS));
+ prev_ins = ins;
+ ins = NOP;
+ }
+
+ types >>= SLJIT_DEF_SHIFT;
+ }
+
+ *ins_ptr = prev_ins;
+
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 post_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types)
+{
+ sljit_s32 stack_offset = 0;
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ stack_offset += sizeof(sljit_f32);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ if (stack_offset & 0x7)
+ stack_offset += sizeof(sljit_sw);
+ stack_offset += sizeof(sljit_f64);
+ break;
+ default:
+ stack_offset += sizeof(sljit_sw);
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ /* Stack is aligned to 16 bytes, max two doubles can be placed on the stack. */
+ if (stack_offset > 16)
+ return push_inst(compiler, ADDIU | S(SLJIT_SP) | T(SLJIT_SP) | IMM(16), DR(SLJIT_SP));
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+ struct sljit_jump *jump;
+ sljit_ins ins;
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+ jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
+ PTR_FAIL_IF(!jump);
+ set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
+ type &= 0xff;
+
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins));
+
+ SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
+
+ PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0));
+
+ jump->flags |= IS_JAL | IS_CALL;
+ PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ jump->addr = compiler->size;
+ PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS));
+
+ PTR_FAIL_IF(post_call_with_args(compiler, arg_types));
+
+ return jump;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+ SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
+
+ if (src & SLJIT_IMM)
+ FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw));
+ else if (FAST_IS_REG(src))
+ FAIL_IF(push_inst(compiler, ADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
+ else if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
+ }
+
+ FAIL_IF(call_with_args(compiler, arg_types, &ins));
+
+ /* Register input. */
+ FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS));
+ return post_call_with_args(compiler, arg_types);
+}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c
index dd114bb27a..ff6f048659 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_64.c
@@ -537,3 +537,132 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_consta
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
SLJIT_CACHE_FLUSH(inst, inst + 6);
}
+
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_ins *ins_ptr)
+{
+ sljit_s32 arg_count = 0;
+ sljit_s32 word_arg_count = 0;
+ sljit_s32 float_arg_count = 0;
+ sljit_s32 types = 0;
+ sljit_ins prev_ins = NOP;
+ sljit_ins ins = NOP;
+
+ SLJIT_ASSERT(reg_map[TMP_REG3] == 4 && freg_map[TMP_FREG1] == 12);
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ case SLJIT_ARG_TYPE_F64:
+ arg_count++;
+ float_arg_count++;
+ break;
+ default:
+ arg_count++;
+ word_arg_count++;
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ while (types) {
+ switch (types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ if (arg_count != float_arg_count)
+ ins = MOV_S | FMT_S | FS(float_arg_count) | FD(arg_count);
+ else if (arg_count == 1)
+ ins = MOV_S | FMT_S | FS(SLJIT_FR0) | FD(TMP_FREG1);
+ arg_count--;
+ float_arg_count--;
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ if (arg_count != float_arg_count)
+ ins = MOV_S | FMT_D | FS(float_arg_count) | FD(arg_count);
+ else if (arg_count == 1)
+ ins = MOV_S | FMT_D | FS(SLJIT_FR0) | FD(TMP_FREG1);
+ arg_count--;
+ float_arg_count--;
+ break;
+ default:
+ if (arg_count != word_arg_count)
+ ins = DADDU | S(word_arg_count) | TA(0) | D(arg_count);
+ else if (arg_count == 1)
+ ins = DADDU | S(SLJIT_R0) | TA(0) | D(TMP_REG3);
+ arg_count--;
+ word_arg_count--;
+ break;
+ }
+
+ if (ins != NOP) {
+ if (prev_ins != NOP)
+ FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS));
+ prev_ins = ins;
+ ins = NOP;
+ }
+
+ types >>= SLJIT_DEF_SHIFT;
+ }
+
+ *ins_ptr = prev_ins;
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+ struct sljit_jump *jump;
+ sljit_ins ins;
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+ jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
+ PTR_FAIL_IF(!jump);
+ set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
+ type &= 0xff;
+
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins));
+
+ SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
+
+ PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0));
+
+ jump->flags |= IS_JAL | IS_CALL;
+ PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ jump->addr = compiler->size;
+ PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS));
+
+ return jump;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+ SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
+
+ if (src & SLJIT_IMM)
+ FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw));
+ else if (FAST_IS_REG(src))
+ FAIL_IF(push_inst(compiler, DADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
+ else if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
+ }
+
+ FAIL_IF(call_with_args(compiler, arg_types, &ins));
+
+ /* Register input. */
+ FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
+ return push_inst(compiler, ins, UNMOVABLE_INS);
+}
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c
index 00e8303090..e108433f70 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeMIPS_common.c
@@ -60,13 +60,27 @@ typedef sljit_u32 sljit_ins;
#define EQUAL_FLAG 31
#define OTHER_FLAG 1
-#define TMP_FREG1 (0)
-#define TMP_FREG2 ((SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) << 1)
+#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
0, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 24, 23, 22, 21, 20, 19, 18, 17, 16, 29, 3, 25, 4
};
+#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
+ 0, 0, 14, 2, 4, 6, 8, 12, 10
+};
+
+#else
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
+ 0, 0, 13, 14, 15, 16, 17, 12, 18
+};
+
+#endif
+
/* --------------------------------------------------------------------- */
/* Instrucion forms */
/* --------------------------------------------------------------------- */
@@ -74,21 +88,23 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
#define S(s) (reg_map[s] << 21)
#define T(t) (reg_map[t] << 16)
#define D(d) (reg_map[d] << 11)
+#define FT(t) (freg_map[t] << 16)
+#define FS(s) (freg_map[s] << 11)
+#define FD(d) (freg_map[d] << 6)
/* Absolute registers. */
#define SA(s) ((s) << 21)
#define TA(t) ((t) << 16)
#define DA(d) ((d) << 11)
-#define FT(t) ((t) << 16)
-#define FS(s) ((s) << 11)
-#define FD(d) ((d) << 6)
#define IMM(imm) ((imm) & 0xffff)
#define SH_IMM(imm) ((imm) << 6)
#define DR(dr) (reg_map[dr])
+#define FR(dr) (freg_map[dr])
#define HI(opcode) ((opcode) << 26)
#define LO(opcode) (opcode)
/* S = (16 << 21) D = (17 << 21) */
#define FMT_S (16 << 21)
+#define FMT_D (17 << 21)
#define ABS_S (HI(17) | FMT_S | LO(5))
#define ADD_S (HI(17) | FMT_S | LO(0))
@@ -153,6 +169,7 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
#define OR (HI(0) | LO(37))
#define ORI (HI(13))
#define SD (HI(63))
+#define SDC1 (HI(61))
#define SLT (HI(0) | LO(42))
#define SLTI (HI(10))
#define SLTIU (HI(11))
@@ -166,6 +183,7 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
#define SUB_S (HI(17) | FMT_S | LO(1))
#define SUBU (HI(0) | LO(35))
#define SW (HI(43))
+#define SWC1 (HI(57))
#define TRUNC_W_S (HI(17) | FMT_S | LO(13))
#define XOR (HI(0) | LO(38))
#define XORI (HI(14))
@@ -540,21 +558,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#define MEM_MASK 0x1f
-#define WRITE_BACK 0x00020
-#define ARG_TEST 0x00040
-#define ALT_KEEP_CACHE 0x00080
-#define CUMULATIVE_OP 0x00100
-#define LOGICAL_OP 0x00200
-#define IMM_OP 0x00400
-#define SRC2_IMM 0x00800
-
-#define UNUSED_DEST 0x01000
-#define REG_DEST 0x02000
-#define REG1_SOURCE 0x04000
-#define REG2_SOURCE 0x08000
-#define SLOW_SRC1 0x10000
-#define SLOW_SRC2 0x20000
-#define SLOW_DEST 0x40000
+#define ARG_TEST 0x00020
+#define ALT_KEEP_CACHE 0x00040
+#define CUMULATIVE_OP 0x00080
+#define LOGICAL_OP 0x00100
+#define IMM_OP 0x00200
+#define SRC2_IMM 0x00400
+
+#define UNUSED_DEST 0x00800
+#define REG_DEST 0x01000
+#define REG1_SOURCE 0x02000
+#define REG2_SOURCE 0x04000
+#define SLOW_SRC1 0x08000
+#define SLOW_SRC2 0x10000
+#define SLOW_DEST 0x20000
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
#define STACK_STORE SW
@@ -564,6 +581,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#define STACK_LOAD LD
#endif
+static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw);
+
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
#include "sljitNativeMIPS_32.c"
#else
@@ -571,15 +590,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#endif
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
sljit_ins base;
- sljit_s32 i, tmp, offs;
+ sljit_s32 args, i, tmp, offs;
CHECK_ERROR();
- CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET;
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
@@ -616,6 +635,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
FAIL_IF(push_inst(compiler, STACK_STORE | base | T(i) | IMM(offs), MOVABLE_INS));
}
+ args = get_arg_count(arg_types);
+
if (args >= 1)
FAIL_IF(push_inst(compiler, ADDU_W | SA(4) | TA(0) | D(SLJIT_S0), DR(SLJIT_S0)));
if (args >= 2)
@@ -627,12 +648,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
CHECK_ERROR();
- CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET;
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
@@ -734,7 +755,7 @@ static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flag
{
SLJIT_ASSERT(arg & SLJIT_MEM);
- if ((!(flags & WRITE_BACK) || !(arg & REG_MASK)) && !(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN) {
+ if (!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN) {
/* Works for both absoulte and relative addresses. */
if (SLJIT_UNLIKELY(flags & ARG_TEST))
return 1;
@@ -791,12 +812,6 @@ static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sl
base = arg & REG_MASK;
if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
- if (SLJIT_UNLIKELY(flags & WRITE_BACK)) {
- SLJIT_ASSERT(argw == 0);
- FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(OFFS_REG(arg)) | D(base), DR(base)));
- return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar), delay_slot);
- }
-
argw &= 0x3;
/* Using the cache. */
@@ -833,29 +848,6 @@ static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sl
return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot);
}
- if (SLJIT_UNLIKELY(flags & WRITE_BACK) && base) {
- if (argw <= SIMM_MAX && argw >= SIMM_MIN) {
- if (argw)
- FAIL_IF(push_inst(compiler, ADDIU_W | S(base) | T(base) | IMM(argw), DR(base)));
- }
- else {
- if (compiler->cache_arg == SLJIT_MEM && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) {
- if (argw != compiler->cache_argw) {
- FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3)));
- compiler->cache_argw = argw;
- }
- FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(base), DR(base)));
- }
- else {
- compiler->cache_arg = SLJIT_MEM;
- compiler->cache_argw = argw;
- FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw));
- FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(base), DR(base)));
- }
- }
- return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar), delay_slot);
- }
-
if (compiler->cache_arg == arg && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) {
if (argw != compiler->cache_argw) {
FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3)));
@@ -929,7 +921,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
else if (FAST_IS_REG(dst)) {
dst_r = dst;
flags |= REG_DEST;
- if (op >= SLJIT_MOV && op <= SLJIT_MOVU_S32)
+ if (op >= SLJIT_MOV && op <= SLJIT_MOV_P)
sugg_src2_r = dst_r;
}
else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, DR(TMP_REG1), dst, dstw))
@@ -983,7 +975,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
if (FAST_IS_REG(src2)) {
src2_r = src2;
flags |= REG2_SOURCE;
- if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_S32)
+ if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P)
dst_r = src2_r;
}
else if (src2 & SLJIT_IMM) {
@@ -994,7 +986,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
}
else {
src2_r = 0;
- if ((op >= SLJIT_MOV && op <= SLJIT_MOVU_S32) && (dst & SLJIT_MEM))
+ if ((op >= SLJIT_MOV && op <= SLJIT_MOV_P) && (dst & SLJIT_MEM))
dst_r = 0;
}
}
@@ -1133,11 +1125,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
}
#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
- if ((op & SLJIT_I32_OP) && GET_OPCODE(op) >= SLJIT_NOT) {
+ if ((op & SLJIT_I32_OP) && GET_OPCODE(op) >= SLJIT_NOT)
flags |= INT_DATA | SIGNED_DATA;
- if (src & SLJIT_IMM)
- srcw = (sljit_s32)srcw;
- }
#endif
switch (GET_OPCODE(op)) {
@@ -1171,36 +1160,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
case SLJIT_MOV_S16:
return emit_op(compiler, SLJIT_MOV_S16, HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
- case SLJIT_MOVU:
- case SLJIT_MOVU_P:
- return emit_op(compiler, SLJIT_MOV, WORD_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw);
-
- case SLJIT_MOVU_U32:
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- return emit_op(compiler, SLJIT_MOV_U32, INT_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw);
-#else
- return emit_op(compiler, SLJIT_MOV_U32, INT_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u32)srcw : srcw);
-#endif
-
- case SLJIT_MOVU_S32:
-#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
- return emit_op(compiler, SLJIT_MOV_S32, INT_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw);
-#else
- return emit_op(compiler, SLJIT_MOV_S32, INT_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s32)srcw : srcw);
-#endif
-
- case SLJIT_MOVU_U8:
- return emit_op(compiler, SLJIT_MOV_U8, BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
-
- case SLJIT_MOVU_S8:
- return emit_op(compiler, SLJIT_MOV_S8, BYTE_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
-
- case SLJIT_MOVU_U16:
- return emit_op(compiler, SLJIT_MOV_U16, HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
-
- case SLJIT_MOVU_S16:
- return emit_op(compiler, SLJIT_MOV_S16, HALF_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
-
case SLJIT_NOT:
return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
@@ -1211,6 +1170,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
}
+ SLJIT_UNREACHABLE();
return SLJIT_SUCCESS;
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
@@ -1282,6 +1242,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
}
+ SLJIT_UNREACHABLE();
return SLJIT_SUCCESS;
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
@@ -1298,7 +1259,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
{
CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- return reg << 1;
+ return FR(reg);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
@@ -1328,11 +1289,9 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
#endif
if (src & SLJIT_MEM) {
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src, srcw, dst, dstw));
src = TMP_FREG1;
}
- else
- src <<= 1;
FAIL_IF(push_inst(compiler, (TRUNC_W_S ^ (flags >> 19)) | FMT(op) | FS(src) | FD(TMP_FREG1), MOVABLE_INS));
@@ -1340,7 +1299,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
return push_inst(compiler, MFC1 | flags | T(dst) | FS(TMP_FREG1), MOVABLE_INS);
/* Store the integer value from a VFP register. */
- return emit_op_mem2(compiler, flags ? DOUBLE_DATA : SINGLE_DATA, TMP_FREG1, dst, dstw, 0, 0);
+ return emit_op_mem2(compiler, flags ? DOUBLE_DATA : SINGLE_DATA, FR(TMP_FREG1), dst, dstw, 0, 0);
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
# undef is_long
@@ -1357,13 +1316,13 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW) << 21;
#endif
- sljit_s32 dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (FAST_IS_REG(src))
FAIL_IF(push_inst(compiler, MTC1 | flags | T(src) | FS(TMP_FREG1), MOVABLE_INS));
else if (src & SLJIT_MEM) {
/* Load the integer value into a VFP register. */
- FAIL_IF(emit_op_mem2(compiler, ((flags) ? DOUBLE_DATA : SINGLE_DATA) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
+ FAIL_IF(emit_op_mem2(compiler, ((flags) ? DOUBLE_DATA : SINGLE_DATA) | LOAD_DATA, FR(TMP_FREG1), src, srcw, dst, dstw));
}
else {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -1377,7 +1336,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | (((op & SLJIT_F32_OP) ^ SLJIT_F32_OP) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
if (dst & SLJIT_MEM)
- return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
+ return emit_op_mem2(compiler, FLOAT_DATA(op), FR(TMP_FREG1), dst, dstw, 0, 0);
return SLJIT_SUCCESS;
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
@@ -1392,18 +1351,14 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
sljit_ins inst;
if (src1 & SLJIT_MEM) {
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w, src2, src2w));
src1 = TMP_FREG1;
}
- else
- src1 <<= 1;
if (src2 & SLJIT_MEM) {
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0));
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w, 0, 0));
src2 = TMP_FREG2;
}
- else
- src2 <<= 1;
switch (GET_FLAG_TYPE(op)) {
case SLJIT_EQUAL_F64:
@@ -1443,14 +1398,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
op ^= SLJIT_F32_OP;
- dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_MEM) {
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, dst, dstw));
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(dst_r), src, srcw, dst, dstw));
src = dst_r;
}
- else
- src <<= 1;
switch (GET_OPCODE(op)) {
case SLJIT_MOV_F64:
@@ -1474,7 +1427,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
}
if (dst & SLJIT_MEM)
- return emit_op_mem2(compiler, FLOAT_DATA(op), dst_r, dst, dstw, 0, 0);
+ return emit_op_mem2(compiler, FLOAT_DATA(op), FR(dst_r), dst, dstw, 0, 0);
return SLJIT_SUCCESS;
}
@@ -1494,42 +1447,38 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG2;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2;
if (src1 & SLJIT_MEM) {
- if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) {
+ if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w)) {
FAIL_IF(compiler->error);
src1 = TMP_FREG1;
} else
flags |= SLOW_SRC1;
}
- else
- src1 <<= 1;
if (src2 & SLJIT_MEM) {
- if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) {
+ if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w)) {
FAIL_IF(compiler->error);
src2 = TMP_FREG2;
} else
flags |= SLOW_SRC2;
}
- else
- src2 <<= 1;
if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) {
if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w));
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w, src1, src1w));
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w, dst, dstw));
}
else {
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w, src2, src2w));
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w, dst, dstw));
}
}
else if (flags & SLOW_SRC1)
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w, dst, dstw));
else if (flags & SLOW_SRC2)
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
+ FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w, dst, dstw));
if (flags & SLOW_SRC1)
src1 = TMP_FREG1;
@@ -1555,7 +1504,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
}
if (dst_r == TMP_FREG2)
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0));
+ FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), FR(TMP_FREG2), dst, dstw, 0, 0));
return SLJIT_SUCCESS;
}
@@ -1585,10 +1534,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
if (FAST_IS_REG(src))
FAIL_IF(push_inst(compiler, ADDU_W | S(src) | TA(0) | DA(RETURN_ADDR_REG), RETURN_ADDR_REG));
- else if (src & SLJIT_MEM)
+ else
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, RETURN_ADDR_REG, src, srcw));
- else if (src & SLJIT_IMM)
- FAIL_IF(load_immediate(compiler, RETURN_ADDR_REG, srcw));
FAIL_IF(push_inst(compiler, JR | SA(RETURN_ADDR_REG), UNMOVABLE_INS));
return push_inst(compiler, NOP, UNMOVABLE_INS);
@@ -1705,19 +1652,16 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
PTR_FAIL_IF(push_inst(compiler, inst, UNMOVABLE_INS));
PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0));
- if (type <= SLJIT_JUMP) {
+
+ if (type <= SLJIT_JUMP)
PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS));
- jump->addr = compiler->size;
- PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
- } else {
- SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
- /* Cannot be optimized out if type is >= CALL0. */
- jump->flags |= IS_JAL | (type >= SLJIT_CALL0 ? IS_CALL : 0);
+ else {
+ jump->flags |= IS_JAL;
PTR_FAIL_IF(push_inst(compiler, JALR | S(TMP_REG2) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
- jump->addr = compiler->size;
- /* A NOP if type < CALL1. */
- PTR_FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_R0) | TA(0) | DA(4), UNMOVABLE_INS));
}
+
+ jump->addr = compiler->size;
+ PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
return jump;
}
@@ -1873,41 +1817,12 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
{
- sljit_s32 src_r = TMP_REG2;
struct sljit_jump *jump = NULL;
CHECK_ERROR();
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
ADJUST_LOCAL_OFFSET(src, srcw);
- if (FAST_IS_REG(src)) {
- if (DR(src) != 4)
- src_r = src;
- else
- FAIL_IF(push_inst(compiler, ADDU_W | S(src) | TA(0) | D(TMP_REG2), DR(TMP_REG2)));
- }
-
- if (type >= SLJIT_CALL0) {
- SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
- if (src & (SLJIT_IMM | SLJIT_MEM)) {
- if (src & SLJIT_IMM)
- FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw));
- else {
- SLJIT_ASSERT(src_r == TMP_REG2 && (src & SLJIT_MEM));
- FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw));
- }
- FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
- /* We need an extra instruction in any case. */
- return push_inst(compiler, ADDU_W | S(SLJIT_R0) | TA(0) | DA(4), UNMOVABLE_INS);
- }
-
- /* Register input. */
- if (type >= SLJIT_CALL1)
- FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_R0) | TA(0) | DA(4), 4));
- FAIL_IF(push_inst(compiler, JALR | S(src_r) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
- return push_inst(compiler, ADDU_W | S(src_r) | TA(0) | D(PIC_ADDR_REG), UNMOVABLE_INS);
- }
-
if (src & SLJIT_IMM) {
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
@@ -1918,11 +1833,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
jump->flags |= IS_MOVABLE;
FAIL_IF(emit_const(compiler, TMP_REG2, 0));
+ src = TMP_REG2;
+ }
+ else if (src & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(TMP_REG2), src, srcw));
+ src = TMP_REG2;
}
- else if (src & SLJIT_MEM)
- FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw));
- FAIL_IF(push_inst(compiler, JR | S(src_r), UNMOVABLE_INS));
+ FAIL_IF(push_inst(compiler, JR | S(src), UNMOVABLE_INS));
if (jump)
jump->addr = compiler->size;
FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c
index 5366c30d90..706b2ba20b 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_64.c
@@ -413,6 +413,61 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
return SLJIT_SUCCESS;
}
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src)
+{
+ sljit_s32 arg_count = 0;
+ sljit_s32 word_arg_count = 0;
+ sljit_s32 types = 0;
+ sljit_s32 reg = 0;
+
+ if (src)
+ reg = *src & REG_MASK;
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ case SLJIT_ARG_TYPE_F64:
+ arg_count++;
+ break;
+ default:
+ arg_count++;
+ word_arg_count++;
+
+ if (arg_count != word_arg_count && arg_count == reg) {
+ FAIL_IF(push_inst(compiler, OR | S(reg) | A(TMP_CALL_REG) | B(reg)));
+ *src = TMP_CALL_REG;
+ }
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ while (types) {
+ switch (types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ case SLJIT_ARG_TYPE_F64:
+ arg_count--;
+ break;
+ default:
+ if (arg_count != word_arg_count)
+ FAIL_IF(push_inst(compiler, OR | S(word_arg_count) | A(arg_count) | B(word_arg_count)));
+
+ arg_count--;
+ word_arg_count--;
+ break;
+ }
+
+ types >>= SLJIT_DEF_SHIFT;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw init_value)
{
FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 48)));
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c
index 2bf855c6bc..5ef4ac96c4 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativePPC_common.c
@@ -93,20 +93,23 @@ static void ppc_cache_flush(sljit_ins *from, sljit_ins *to)
#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
-#define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4)
-#define TMP_ZERO (SLJIT_NUMBER_OF_REGISTERS + 5)
+#define TMP_ZERO (SLJIT_NUMBER_OF_REGISTERS + 4)
#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL)
-#define TMP_CALL_REG (SLJIT_NUMBER_OF_REGISTERS + 6)
+#define TMP_CALL_REG (SLJIT_NUMBER_OF_REGISTERS + 5)
#else
#define TMP_CALL_REG TMP_REG2
#endif
-#define TMP_FREG1 (0)
-#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 7] = {
- 0, 3, 4, 5, 6, 7, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 1, 8, 9, 10, 31, 12
+ 0, 3, 4, 5, 6, 7, 8, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 1, 9, 10, 31, 12
+};
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
+ 0, 1, 2, 3, 4, 5, 6, 0, 7
};
/* --------------------------------------------------------------------- */
@@ -117,11 +120,11 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 7] = {
#define A(a) (reg_map[a] << 16)
#define B(b) (reg_map[b] << 11)
#define C(c) (reg_map[c] << 6)
-#define FD(fd) ((fd) << 21)
-#define FS(fs) ((fs) << 21)
-#define FA(fa) ((fa) << 16)
-#define FB(fb) ((fb) << 11)
-#define FC(fc) ((fc) << 6)
+#define FD(fd) (freg_map[fd] << 21)
+#define FS(fs) (freg_map[fs] << 21)
+#define FA(fa) (freg_map[fa] << 16)
+#define FB(fb) (freg_map[fb] << 11)
+#define FC(fc) (freg_map[fc] << 6)
#define IMM(imm) ((imm) & 0xffff)
#define CRD(d) ((d) << 21)
@@ -536,7 +539,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
return 1;
#endif
- case SLJIT_HAS_PRE_UPDATE:
case SLJIT_HAS_CLZ:
return 1;
@@ -554,46 +556,40 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
/* Creates an index in data_transfer_insts array. */
#define LOAD_DATA 0x01
#define INDEXED 0x02
-#define WRITE_BACK 0x04
+#define SIGNED_DATA 0x04
+
#define WORD_DATA 0x00
#define BYTE_DATA 0x08
#define HALF_DATA 0x10
#define INT_DATA 0x18
-#define SIGNED_DATA 0x20
/* Separates integer and floating point registers */
-#define GPR_REG 0x3f
-#define DOUBLE_DATA 0x40
+#define GPR_REG 0x1f
+#define DOUBLE_DATA 0x20
#define MEM_MASK 0x7f
/* Other inp_flags. */
-#define ARG_TEST 0x000100
/* Integer opertion and set flags -> requires exts on 64 bit systems. */
-#define ALT_SIGN_EXT 0x000200
+#define ALT_SIGN_EXT 0x000100
/* This flag affects the RC() and OERC() macros. */
#define ALT_SET_FLAGS 0x000400
-#define ALT_KEEP_CACHE 0x000800
-#define ALT_FORM1 0x010000
-#define ALT_FORM2 0x020000
-#define ALT_FORM3 0x040000
-#define ALT_FORM4 0x080000
-#define ALT_FORM5 0x100000
+#define ALT_FORM1 0x001000
+#define ALT_FORM2 0x002000
+#define ALT_FORM3 0x004000
+#define ALT_FORM4 0x008000
+#define ALT_FORM5 0x010000
/* Source and destination is register. */
#define REG_DEST 0x000001
#define REG1_SOURCE 0x000002
#define REG2_SOURCE 0x000004
-/* getput_arg_fast returned true. */
-#define FAST_DEST 0x000008
-/* Multiple instructions are required. */
-#define SLOW_DEST 0x000010
/*
-ALT_SIGN_EXT 0x000200
-ALT_SET_FLAGS 0x000400
-ALT_FORM1 0x010000
+ALT_SIGN_EXT 0x000100
+ALT_SET_FLAGS 0x000200
+ALT_FORM1 0x001000
...
-ALT_FORM5 0x100000 */
+ALT_FORM5 0x010000 */
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
#include "sljitNativePPC_32.c"
@@ -610,14 +606,14 @@ ALT_FORM5 0x100000 */
#endif
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 i, tmp, offs;
+ sljit_s32 args, i, tmp, offs;
CHECK_ERROR();
- CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
FAIL_IF(push_inst(compiler, MFLR | D(0)));
offs = -(sljit_s32)(sizeof(sljit_sw));
@@ -643,6 +639,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
#endif
FAIL_IF(push_inst(compiler, ADDI | D(TMP_ZERO) | A(0) | 0));
+
+ args = get_arg_count(arg_types);
+
if (args >= 1)
FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0) | A(SLJIT_S0) | B(SLJIT_R0)));
if (args >= 2)
@@ -674,12 +673,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
CHECK_ERROR();
- CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET;
compiler->local_size = (local_size + 15) & ~0xf;
@@ -738,17 +737,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
/* Operators */
/* --------------------------------------------------------------------- */
-/* i/x - immediate/indexed form
- n/w - no write-back / write-back (1 bit)
- s/l - store/load (1 bit)
+/* s/l - store/load (1 bit)
+ i/x - immediate/indexed form
u/s - signed/unsigned (1 bit)
w/b/h/i - word/byte/half/int allowed (2 bit)
- It contans 32 items, but not all are different. */
+
+ Some opcodes are repeated (e.g. store signed / unsigned byte is the same instruction). */
/* 64 bit only: [reg+imm] must be aligned to 4 bytes. */
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
#define INT_ALIGNED 0x10000
-/* 64-bit only: there is no lwau instruction. */
-#define UPDATE_REQ 0x20000
+#endif
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
#define ARCH_32_64(a, b) a
@@ -757,406 +756,217 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
#else
#define ARCH_32_64(a, b) b
#define INST_CODE_AND_DST(inst, flags, reg) \
- (((inst) & ~(INT_ALIGNED | UPDATE_REQ)) | (((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)))
+ (((inst) & ~INT_ALIGNED) | (((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)))
#endif
static const sljit_ins data_transfer_insts[64 + 16] = {
-/* -------- Unsigned -------- */
+/* -------- Integer -------- */
/* Word. */
-/* u w n i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | INT_ALIGNED | 0x0 /* std */),
-/* u w n i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | INT_ALIGNED | 0x0 /* ld */),
-/* u w n x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */),
-/* u w n x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */),
+/* w u i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | INT_ALIGNED | 0x0 /* std */),
+/* w u i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | INT_ALIGNED | 0x0 /* ld */),
+/* w u x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */),
+/* w u x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */),
-/* u w w i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | INT_ALIGNED | 0x1 /* stdu */),
-/* u w w i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | INT_ALIGNED | 0x1 /* ldu */),
-/* u w w x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */),
-/* u w w x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */),
+/* w s i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | INT_ALIGNED | 0x0 /* std */),
+/* w s i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | INT_ALIGNED | 0x0 /* ld */),
+/* w s x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */),
+/* w s x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */),
/* Byte. */
-/* u b n i s */ HI(38) /* stb */,
-/* u b n i l */ HI(34) /* lbz */,
-/* u b n x s */ HI(31) | LO(215) /* stbx */,
-/* u b n x l */ HI(31) | LO(87) /* lbzx */,
+/* b u i s */ HI(38) /* stb */,
+/* b u i l */ HI(34) /* lbz */,
+/* b u x s */ HI(31) | LO(215) /* stbx */,
+/* b u x l */ HI(31) | LO(87) /* lbzx */,
-/* u b w i s */ HI(39) /* stbu */,
-/* u b w i l */ HI(35) /* lbzu */,
-/* u b w x s */ HI(31) | LO(247) /* stbux */,
-/* u b w x l */ HI(31) | LO(119) /* lbzux */,
+/* b s i s */ HI(38) /* stb */,
+/* b s i l */ HI(34) /* lbz */ /* EXTS_REQ */,
+/* b s x s */ HI(31) | LO(215) /* stbx */,
+/* b s x l */ HI(31) | LO(87) /* lbzx */ /* EXTS_REQ */,
/* Half. */
-/* u h n i s */ HI(44) /* sth */,
-/* u h n i l */ HI(40) /* lhz */,
-/* u h n x s */ HI(31) | LO(407) /* sthx */,
-/* u h n x l */ HI(31) | LO(279) /* lhzx */,
+/* h u i s */ HI(44) /* sth */,
+/* h u i l */ HI(40) /* lhz */,
+/* h u x s */ HI(31) | LO(407) /* sthx */,
+/* h u x l */ HI(31) | LO(279) /* lhzx */,
-/* u h w i s */ HI(45) /* sthu */,
-/* u h w i l */ HI(41) /* lhzu */,
-/* u h w x s */ HI(31) | LO(439) /* sthux */,
-/* u h w x l */ HI(31) | LO(311) /* lhzux */,
+/* h s i s */ HI(44) /* sth */,
+/* h s i l */ HI(42) /* lha */,
+/* h s x s */ HI(31) | LO(407) /* sthx */,
+/* h s x l */ HI(31) | LO(343) /* lhax */,
/* Int. */
-/* u i n i s */ HI(36) /* stw */,
-/* u i n i l */ HI(32) /* lwz */,
-/* u i n x s */ HI(31) | LO(151) /* stwx */,
-/* u i n x l */ HI(31) | LO(23) /* lwzx */,
+/* i u i s */ HI(36) /* stw */,
+/* i u i l */ HI(32) /* lwz */,
+/* i u x s */ HI(31) | LO(151) /* stwx */,
+/* i u x l */ HI(31) | LO(23) /* lwzx */,
+
+/* i s i s */ HI(36) /* stw */,
+/* i s i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | INT_ALIGNED | 0x2 /* lwa */),
+/* i s x s */ HI(31) | LO(151) /* stwx */,
+/* i s x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(341) /* lwax */),
-/* u i w i s */ HI(37) /* stwu */,
-/* u i w i l */ HI(33) /* lwzu */,
-/* u i w x s */ HI(31) | LO(183) /* stwux */,
-/* u i w x l */ HI(31) | LO(55) /* lwzux */,
+/* -------- Floating point -------- */
-/* -------- Signed -------- */
+/* d i s */ HI(54) /* stfd */,
+/* d i l */ HI(50) /* lfd */,
+/* d x s */ HI(31) | LO(727) /* stfdx */,
+/* d x l */ HI(31) | LO(599) /* lfdx */,
+
+/* s i s */ HI(52) /* stfs */,
+/* s i l */ HI(48) /* lfs */,
+/* s x s */ HI(31) | LO(663) /* stfsx */,
+/* s x l */ HI(31) | LO(535) /* lfsx */,
+};
+
+static const sljit_ins updated_data_transfer_insts[64] = {
+
+/* -------- Integer -------- */
/* Word. */
-/* s w n i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | INT_ALIGNED | 0x0 /* std */),
-/* s w n i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | INT_ALIGNED | 0x0 /* ld */),
-/* s w n x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */),
-/* s w n x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */),
+/* w u i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | INT_ALIGNED | 0x1 /* stdu */),
+/* w u i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | INT_ALIGNED | 0x1 /* ldu */),
+/* w u x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */),
+/* w u x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */),
-/* s w w i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | INT_ALIGNED | 0x1 /* stdu */),
-/* s w w i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | INT_ALIGNED | 0x1 /* ldu */),
-/* s w w x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */),
-/* s w w x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */),
+/* w s i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | INT_ALIGNED | 0x1 /* stdu */),
+/* w s i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | INT_ALIGNED | 0x1 /* ldu */),
+/* w s x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */),
+/* w s x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */),
/* Byte. */
-/* s b n i s */ HI(38) /* stb */,
-/* s b n i l */ HI(34) /* lbz */ /* EXTS_REQ */,
-/* s b n x s */ HI(31) | LO(215) /* stbx */,
-/* s b n x l */ HI(31) | LO(87) /* lbzx */ /* EXTS_REQ */,
+/* b u i s */ HI(39) /* stbu */,
+/* b u i l */ HI(35) /* lbzu */,
+/* b u x s */ HI(31) | LO(247) /* stbux */,
+/* b u x l */ HI(31) | LO(119) /* lbzux */,
-/* s b w i s */ HI(39) /* stbu */,
-/* s b w i l */ HI(35) /* lbzu */ /* EXTS_REQ */,
-/* s b w x s */ HI(31) | LO(247) /* stbux */,
-/* s b w x l */ HI(31) | LO(119) /* lbzux */ /* EXTS_REQ */,
+/* b s i s */ HI(39) /* stbu */,
+/* b s i l */ 0 /* no such instruction */,
+/* b s x s */ HI(31) | LO(247) /* stbux */,
+/* b s x l */ 0 /* no such instruction */,
/* Half. */
-/* s h n i s */ HI(44) /* sth */,
-/* s h n i l */ HI(42) /* lha */,
-/* s h n x s */ HI(31) | LO(407) /* sthx */,
-/* s h n x l */ HI(31) | LO(343) /* lhax */,
+/* h u i s */ HI(45) /* sthu */,
+/* h u i l */ HI(41) /* lhzu */,
+/* h u x s */ HI(31) | LO(439) /* sthux */,
+/* h u x l */ HI(31) | LO(311) /* lhzux */,
-/* s h w i s */ HI(45) /* sthu */,
-/* s h w i l */ HI(43) /* lhau */,
-/* s h w x s */ HI(31) | LO(439) /* sthux */,
-/* s h w x l */ HI(31) | LO(375) /* lhaux */,
+/* h s i s */ HI(45) /* sthu */,
+/* h s i l */ HI(43) /* lhau */,
+/* h s x s */ HI(31) | LO(439) /* sthux */,
+/* h s x l */ HI(31) | LO(375) /* lhaux */,
/* Int. */
-/* s i n i s */ HI(36) /* stw */,
-/* s i n i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | INT_ALIGNED | 0x2 /* lwa */),
-/* s i n x s */ HI(31) | LO(151) /* stwx */,
-/* s i n x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(341) /* lwax */),
-
-/* s i w i s */ HI(37) /* stwu */,
-/* s i w i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | INT_ALIGNED | UPDATE_REQ | 0x2 /* lwa */),
-/* s i w x s */ HI(31) | LO(183) /* stwux */,
-/* s i w x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(373) /* lwaux */),
-
-/* -------- Double -------- */
-
-/* d n i s */ HI(54) /* stfd */,
-/* d n i l */ HI(50) /* lfd */,
-/* d n x s */ HI(31) | LO(727) /* stfdx */,
-/* d n x l */ HI(31) | LO(599) /* lfdx */,
-
-/* d w i s */ HI(55) /* stfdu */,
-/* d w i l */ HI(51) /* lfdu */,
-/* d w x s */ HI(31) | LO(759) /* stfdux */,
-/* d w x l */ HI(31) | LO(631) /* lfdux */,
-
-/* s n i s */ HI(52) /* stfs */,
-/* s n i l */ HI(48) /* lfs */,
-/* s n x s */ HI(31) | LO(663) /* stfsx */,
-/* s n x l */ HI(31) | LO(535) /* lfsx */,
-
-/* s w i s */ HI(53) /* stfsu */,
-/* s w i l */ HI(49) /* lfsu */,
-/* s w x s */ HI(31) | LO(695) /* stfsux */,
-/* s w x l */ HI(31) | LO(567) /* lfsux */,
+/* i u i s */ HI(37) /* stwu */,
+/* i u i l */ HI(33) /* lwzu */,
+/* i u x s */ HI(31) | LO(183) /* stwux */,
+/* i u x l */ HI(31) | LO(55) /* lwzux */,
+
+/* i s i s */ HI(37) /* stwu */,
+/* i s i l */ ARCH_32_64(HI(33) /* lwzu */, 0 /* no such instruction */),
+/* i s x s */ HI(31) | LO(183) /* stwux */,
+/* i s x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(373) /* lwaux */),
+
+/* -------- Floating point -------- */
+
+/* d i s */ HI(55) /* stfdu */,
+/* d i l */ HI(51) /* lfdu */,
+/* d x s */ HI(31) | LO(759) /* stfdux */,
+/* d x l */ HI(31) | LO(631) /* lfdux */,
+
+/* s i s */ HI(53) /* stfsu */,
+/* s i l */ HI(49) /* lfsu */,
+/* s x s */ HI(31) | LO(695) /* stfsux */,
+/* s x l */ HI(31) | LO(567) /* lfsux */,
};
#undef ARCH_32_64
/* Simple cases, (no caching is required). */
-static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 inp_flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
+static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flags, sljit_s32 reg,
+ sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
{
sljit_ins inst;
+ sljit_s32 offs_reg;
+ sljit_sw high_short;
/* Should work when (arg & REG_MASK) == 0. */
SLJIT_ASSERT(A(0) == 0);
SLJIT_ASSERT(arg & SLJIT_MEM);
- if (arg & OFFS_REG_MASK) {
- if (argw & 0x3)
- return 0;
- if (inp_flags & ARG_TEST)
- return 1;
-
- inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
- SLJIT_ASSERT(!(inst & (INT_ALIGNED | UPDATE_REQ)));
- FAIL_IF(push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(OFFS_REG(arg))));
- return -1;
- }
-
- if (SLJIT_UNLIKELY(!(arg & REG_MASK)))
- inp_flags &= ~WRITE_BACK;
-
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- inst = data_transfer_insts[inp_flags & MEM_MASK];
- SLJIT_ASSERT((arg & REG_MASK) || !(inst & UPDATE_REQ));
-
- if (argw > SIMM_MAX || argw < SIMM_MIN || ((inst & INT_ALIGNED) && (argw & 0x3)) || (inst & UPDATE_REQ))
- return 0;
- if (inp_flags & ARG_TEST)
- return 1;
-#endif
+ if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
+ argw &= 0x3;
+ offs_reg = OFFS_REG(arg);
+ if (argw != 0) {
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
- if (argw > SIMM_MAX || argw < SIMM_MIN)
- return 0;
- if (inp_flags & ARG_TEST)
- return 1;
-
- inst = data_transfer_insts[inp_flags & MEM_MASK];
- SLJIT_ASSERT(!(inst & (INT_ALIGNED | UPDATE_REQ)));
+ FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(arg)) | A(tmp_reg) | (argw << 11) | ((31 - argw) << 1)));
+#else
+ FAIL_IF(push_inst(compiler, RLDI(tmp_reg, OFFS_REG(arg), argw, 63 - argw, 1)));
#endif
+ offs_reg = tmp_reg;
+ }
- FAIL_IF(push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | IMM(argw)));
- return -1;
-}
+ inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
-/* See getput_arg below.
- Note: can_cache is called only for binary operators. Those operator always
- uses word arguments without write back. */
-static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
-{
- sljit_sw high_short, next_high_short;
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- sljit_sw diff;
+ SLJIT_ASSERT(!(inst & INT_ALIGNED));
#endif
- SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM));
-
- if (arg & OFFS_REG_MASK)
- return ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && (argw & 0x3) == (next_argw & 0x3));
-
- if (next_arg & OFFS_REG_MASK)
- return 0;
-
-#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
- high_short = (argw + ((argw & 0x8000) << 1)) & ~0xffff;
- next_high_short = (next_argw + ((next_argw & 0x8000) << 1)) & ~0xffff;
- return high_short == next_high_short;
-#else
- if (argw <= 0x7fffffffl && argw >= -0x80000000l) {
- high_short = (argw + ((argw & 0x8000) << 1)) & ~0xffff;
- next_high_short = (next_argw + ((next_argw & 0x8000) << 1)) & ~0xffff;
- if (high_short == next_high_short)
- return 1;
+ return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(offs_reg));
}
- diff = argw - next_argw;
- if (!(arg & REG_MASK))
- return diff <= SIMM_MAX && diff >= SIMM_MIN;
-
- if (arg == next_arg && diff <= SIMM_MAX && diff >= SIMM_MIN)
- return 1;
-
- return 0;
-#endif
-}
-
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
-#define ADJUST_CACHED_IMM(imm) \
- if ((inst & INT_ALIGNED) && (imm & 0x3)) { \
- /* Adjust cached value. Fortunately this is really a rare case */ \
- compiler->cache_argw += imm & 0x3; \
- FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG3) | A(TMP_REG3) | (imm & 0x3))); \
- imm &= ~0x3; \
- }
-#endif
+ inst = data_transfer_insts[inp_flags & MEM_MASK];
+ arg &= REG_MASK;
-/* Emit the necessary instructions. See can_cache above. */
-static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 inp_flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
-{
- sljit_s32 tmp_r;
- sljit_ins inst;
- sljit_sw high_short, next_high_short;
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- sljit_sw diff;
-#endif
-
- SLJIT_ASSERT(arg & SLJIT_MEM);
-
- tmp_r = ((inp_flags & LOAD_DATA) && ((inp_flags) & MEM_MASK) <= GPR_REG) ? reg : TMP_REG1;
- /* Special case for "mov reg, [reg, ... ]". */
- if ((arg & REG_MASK) == tmp_r)
- tmp_r = TMP_REG1;
-
- if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
- argw &= 0x3;
- /* Otherwise getput_arg_fast would capture it. */
- SLJIT_ASSERT(argw);
+ if ((inst & INT_ALIGNED) && (argw & 0x3) != 0) {
+ FAIL_IF(load_immediate(compiler, tmp_reg, argw));
- if ((SLJIT_MEM | (arg & OFFS_REG_MASK)) == compiler->cache_arg && argw == compiler->cache_argw)
- tmp_r = TMP_REG3;
- else {
- if ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && argw == (next_argw & 0x3)) {
- compiler->cache_arg = SLJIT_MEM | (arg & OFFS_REG_MASK);
- compiler->cache_argw = argw;
- tmp_r = TMP_REG3;
- }
-#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
- FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(arg)) | A(tmp_r) | (argw << 11) | ((31 - argw) << 1)));
-#else
- FAIL_IF(push_inst(compiler, RLDI(tmp_r, OFFS_REG(arg), argw, 63 - argw, 1)));
-#endif
- }
inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
- SLJIT_ASSERT(!(inst & (INT_ALIGNED | UPDATE_REQ)));
- return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(tmp_r));
+ return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg));
}
+#endif
- if (SLJIT_UNLIKELY(!(arg & REG_MASK)))
- inp_flags &= ~WRITE_BACK;
-
- inst = data_transfer_insts[inp_flags & MEM_MASK];
- SLJIT_ASSERT((arg & REG_MASK) || !(inst & UPDATE_REQ));
+ if (argw <= SIMM_MAX && argw >= SIMM_MIN)
+ return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | IMM(argw));
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- if (argw <= 0x7fff7fffl && argw >= -0x80000000l
- && (!(inst & INT_ALIGNED) || !(argw & 0x3)) && !(inst & UPDATE_REQ)) {
+ if (argw <= 0x7fff7fffl && argw >= -0x80000000l) {
#endif
- arg &= REG_MASK;
high_short = (sljit_s32)(argw + ((argw & 0x8000) << 1)) & ~0xffff;
- /* The getput_arg_fast should handle this otherwise. */
+
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
SLJIT_ASSERT(high_short && high_short <= 0x7fffffffl && high_short >= -0x80000000l);
#else
- SLJIT_ASSERT(high_short && !(inst & (INT_ALIGNED | UPDATE_REQ)));
+ SLJIT_ASSERT(high_short);
#endif
- if (inp_flags & WRITE_BACK) {
- tmp_r = arg;
- FAIL_IF(push_inst(compiler, ADDIS | D(arg) | A(arg) | IMM(high_short >> 16)));
- }
- else if (compiler->cache_arg != (SLJIT_MEM | arg) || high_short != compiler->cache_argw) {
- if ((next_arg & SLJIT_MEM) && !(next_arg & OFFS_REG_MASK)) {
- next_high_short = (sljit_s32)(next_argw + ((next_argw & 0x8000) << 1)) & ~0xffff;
- if (high_short == next_high_short) {
- compiler->cache_arg = SLJIT_MEM | arg;
- compiler->cache_argw = high_short;
- tmp_r = TMP_REG3;
- }
- }
- FAIL_IF(push_inst(compiler, ADDIS | D(tmp_r) | A(arg & REG_MASK) | IMM(high_short >> 16)));
- }
- else
- tmp_r = TMP_REG3;
-
- return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(tmp_r) | IMM(argw));
+ FAIL_IF(push_inst(compiler, ADDIS | D(tmp_reg) | A(arg) | IMM(high_short >> 16)));
+ return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(tmp_reg) | IMM(argw));
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
}
- /* Everything else is PPC-64 only. */
- if (SLJIT_UNLIKELY(!(arg & REG_MASK))) {
- diff = argw - compiler->cache_argw;
- if ((compiler->cache_arg & SLJIT_IMM) && diff <= SIMM_MAX && diff >= SIMM_MIN) {
- ADJUST_CACHED_IMM(diff);
- return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(TMP_REG3) | IMM(diff));
- }
+ /* The rest is PPC-64 only. */
- diff = argw - next_argw;
- if ((next_arg & SLJIT_MEM) && diff <= SIMM_MAX && diff >= SIMM_MIN) {
- SLJIT_ASSERT(inp_flags & LOAD_DATA);
-
- compiler->cache_arg = SLJIT_IMM;
- compiler->cache_argw = argw;
- tmp_r = TMP_REG3;
- }
-
- FAIL_IF(load_immediate(compiler, tmp_r, argw));
- return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(tmp_r));
- }
-
- diff = argw - compiler->cache_argw;
- if (compiler->cache_arg == arg && diff <= SIMM_MAX && diff >= SIMM_MIN) {
- SLJIT_ASSERT(!(inp_flags & WRITE_BACK) && !(inst & UPDATE_REQ));
- ADJUST_CACHED_IMM(diff);
- return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(TMP_REG3) | IMM(diff));
- }
+ FAIL_IF(load_immediate(compiler, tmp_reg, argw));
- if ((compiler->cache_arg & SLJIT_IMM) && diff <= SIMM_MAX && diff >= SIMM_MIN) {
- inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
- SLJIT_ASSERT(!(inst & (INT_ALIGNED | UPDATE_REQ)));
- if (compiler->cache_argw != argw) {
- FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG3) | A(TMP_REG3) | IMM(diff)));
- compiler->cache_argw = argw;
- }
- return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(TMP_REG3));
- }
-
- if (argw == next_argw && (next_arg & SLJIT_MEM)) {
- SLJIT_ASSERT(inp_flags & LOAD_DATA);
- FAIL_IF(load_immediate(compiler, TMP_REG3, argw));
-
- compiler->cache_arg = SLJIT_IMM;
- compiler->cache_argw = argw;
-
- inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
- SLJIT_ASSERT(!(inst & (INT_ALIGNED | UPDATE_REQ)));
- return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(TMP_REG3));
- }
-
- diff = argw - next_argw;
- if (arg == next_arg && !(inp_flags & WRITE_BACK) && diff <= SIMM_MAX && diff >= SIMM_MIN) {
- SLJIT_ASSERT(inp_flags & LOAD_DATA);
- FAIL_IF(load_immediate(compiler, TMP_REG3, argw));
- FAIL_IF(push_inst(compiler, ADD | D(TMP_REG3) | A(TMP_REG3) | B(arg & REG_MASK)));
-
- compiler->cache_arg = arg;
- compiler->cache_argw = argw;
-
- return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(TMP_REG3));
- }
-
- if ((next_arg & SLJIT_MEM) && !(next_arg & OFFS_REG_MASK) && diff <= SIMM_MAX && diff >= SIMM_MIN) {
- SLJIT_ASSERT(inp_flags & LOAD_DATA);
- FAIL_IF(load_immediate(compiler, TMP_REG3, argw));
-
- compiler->cache_arg = SLJIT_IMM;
- compiler->cache_argw = argw;
- tmp_r = TMP_REG3;
- }
- else
- FAIL_IF(load_immediate(compiler, tmp_r, argw));
-
- /* Get the indexed version instead of the normal one. */
inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
- SLJIT_ASSERT(!(inst & (INT_ALIGNED | UPDATE_REQ)));
- return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(tmp_r));
+ return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg));
#endif
}
-static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w)
-{
- if (getput_arg_fast(compiler, flags, reg, arg1, arg1w))
- return compiler->error;
- return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w);
-}
-
static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 input_flags,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
@@ -1164,40 +974,21 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
{
/* arg1 goes to TMP_REG1 or src reg
arg2 goes to TMP_REG2, imm or src reg
- TMP_REG3 can be used for caching
- result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */
- sljit_s32 dst_r;
+ result goes to TMP_REG2, so put result can use TMP_REG1. */
+ sljit_s32 dst_r = TMP_REG2;
sljit_s32 src1_r;
sljit_s32 src2_r;
sljit_s32 sugg_src2_r = TMP_REG2;
sljit_s32 flags = input_flags & (ALT_FORM1 | ALT_FORM2 | ALT_FORM3 | ALT_FORM4 | ALT_FORM5 | ALT_SIGN_EXT | ALT_SET_FLAGS);
- if (!(input_flags & ALT_KEEP_CACHE)) {
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
- }
-
/* Destination check. */
- if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) {
- dst_r = TMP_REG2;
- }
- else if (FAST_IS_REG(dst)) {
+ if (SLOW_IS_REG(dst)) {
dst_r = dst;
flags |= REG_DEST;
- if (op >= SLJIT_MOV && op <= SLJIT_MOVU_S32)
+
+ if (op >= SLJIT_MOV && op <= SLJIT_MOV_P)
sugg_src2_r = dst_r;
}
- else {
- SLJIT_ASSERT(dst & SLJIT_MEM);
- if (getput_arg_fast(compiler, input_flags | ARG_TEST, TMP_REG2, dst, dstw)) {
- flags |= FAST_DEST;
- dst_r = TMP_REG2;
- }
- else {
- flags |= SLOW_DEST;
- dst_r = 0;
- }
- }
/* Source 1. */
if (FAST_IS_REG(src1)) {
@@ -1208,80 +999,34 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
FAIL_IF(load_immediate(compiler, TMP_REG1, src1w));
src1_r = TMP_REG1;
}
- else if (getput_arg_fast(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w)) {
- FAIL_IF(compiler->error);
+ else {
+ FAIL_IF(emit_op_mem(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, TMP_REG1));
src1_r = TMP_REG1;
}
- else
- src1_r = 0;
/* Source 2. */
if (FAST_IS_REG(src2)) {
src2_r = src2;
flags |= REG2_SOURCE;
- if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_S32)
+
+ if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P)
dst_r = src2_r;
}
else if (src2 & SLJIT_IMM) {
FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w));
src2_r = sugg_src2_r;
}
- else if (getput_arg_fast(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w)) {
- FAIL_IF(compiler->error);
- src2_r = sugg_src2_r;
- }
- else
- src2_r = 0;
-
- /* src1_r, src2_r and dst_r can be zero (=unprocessed).
- All arguments are complex addressing modes, and it is a binary operator. */
- if (src1_r == 0 && src2_r == 0 && dst_r == 0) {
- if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
- FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w));
- FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw));
- }
- else {
- FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w));
- FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw));
- }
- src1_r = TMP_REG1;
- src2_r = TMP_REG2;
- }
- else if (src1_r == 0 && src2_r == 0) {
- FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w));
- src1_r = TMP_REG1;
- }
- else if (src1_r == 0 && dst_r == 0) {
- FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw));
- src1_r = TMP_REG1;
- }
- else if (src2_r == 0 && dst_r == 0) {
- FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw));
- src2_r = sugg_src2_r;
- }
-
- if (dst_r == 0)
- dst_r = TMP_REG2;
-
- if (src1_r == 0) {
- FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, 0, 0));
- src1_r = TMP_REG1;
- }
-
- if (src2_r == 0) {
- FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w, 0, 0));
+ else {
+ FAIL_IF(emit_op_mem(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w, TMP_REG2));
src2_r = sugg_src2_r;
}
FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r));
- if (flags & (FAST_DEST | SLOW_DEST)) {
- if (flags & FAST_DEST)
- FAIL_IF(getput_arg_fast(compiler, input_flags, dst_r, dst, dstw));
- else
- FAIL_IF(getput_arg(compiler, input_flags, dst_r, dst, dstw, 0, 0));
- }
- return SLJIT_SUCCESS;
+ if (!(dst & SLJIT_MEM))
+ return SLJIT_SUCCESS;
+
+ return emit_op_mem(compiler, input_flags, dst_r, dst, dstw, TMP_REG1);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
@@ -1385,34 +1130,31 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
if (GET_FLAG_TYPE(op_flags) == SLJIT_OVERFLOW)
FAIL_IF(push_inst(compiler, MTXER | S(TMP_ZERO)));
+ if (op < SLJIT_NOT && FAST_IS_REG(src) && src == dst) {
+ if (!TYPE_CAST_NEEDED(op))
+ return SLJIT_SUCCESS;
+ }
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
if (op_flags & SLJIT_I32_OP) {
if (op < SLJIT_NOT) {
- if (FAST_IS_REG(src) && src == dst) {
- if (!TYPE_CAST_NEEDED(op))
- return SLJIT_SUCCESS;
+ if (src & SLJIT_MEM) {
+ if (op == SLJIT_MOV_S32)
+ op = SLJIT_MOV_U32;
+ }
+ else if (src & SLJIT_IMM) {
+ if (op == SLJIT_MOV_U32)
+ op = SLJIT_MOV_S32;
}
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- if (op == SLJIT_MOV_S32 && (src & SLJIT_MEM))
- op = SLJIT_MOV_U32;
- if (op == SLJIT_MOVU_S32 && (src & SLJIT_MEM))
- op = SLJIT_MOVU_U32;
- if (op == SLJIT_MOV_U32 && (src & SLJIT_IMM))
- op = SLJIT_MOV_S32;
- if (op == SLJIT_MOVU_U32 && (src & SLJIT_IMM))
- op = SLJIT_MOVU_S32;
-#endif
}
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
else {
/* Most operations expect sign extended arguments. */
flags |= INT_DATA | SIGNED_DATA;
- if (src & SLJIT_IMM)
- srcw = (sljit_s32)srcw;
if (HAS_FLAGS(op_flags))
flags |= ALT_SIGN_EXT;
}
-#endif
}
+#endif
switch (op) {
case SLJIT_MOV:
@@ -1443,34 +1185,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
case SLJIT_MOV_S16:
return EMIT_MOV(SLJIT_MOV_S16, HALF_DATA | SIGNED_DATA, (sljit_s16));
- case SLJIT_MOVU:
- case SLJIT_MOVU_P:
-#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
- case SLJIT_MOVU_U32:
- case SLJIT_MOVU_S32:
-#endif
- return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw);
-
-#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- case SLJIT_MOVU_U32:
- return EMIT_MOV(SLJIT_MOV_U32, INT_DATA | WRITE_BACK, (sljit_u32));
-
- case SLJIT_MOVU_S32:
- return EMIT_MOV(SLJIT_MOV_S32, INT_DATA | SIGNED_DATA | WRITE_BACK, (sljit_s32));
-#endif
-
- case SLJIT_MOVU_U8:
- return EMIT_MOV(SLJIT_MOV_U8, BYTE_DATA | WRITE_BACK, (sljit_u8));
-
- case SLJIT_MOVU_S8:
- return EMIT_MOV(SLJIT_MOV_S8, BYTE_DATA | SIGNED_DATA | WRITE_BACK, (sljit_s8));
-
- case SLJIT_MOVU_U16:
- return EMIT_MOV(SLJIT_MOV_U16, HALF_DATA | WRITE_BACK, (sljit_u16));
-
- case SLJIT_MOVU_S16:
- return EMIT_MOV(SLJIT_MOV_S16, HALF_DATA | SIGNED_DATA | WRITE_BACK, (sljit_s16));
-
case SLJIT_NOT:
return emit_op(compiler, SLJIT_NOT, flags, dst, dstw, TMP_REG1, 0, src, srcw);
@@ -1558,8 +1272,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
#endif
if (GET_FLAG_TYPE(op) == SLJIT_OVERFLOW)
FAIL_IF(push_inst(compiler, MTXER | S(TMP_ZERO)));
- if (src2 == TMP_REG2)
- flags |= ALT_KEEP_CACHE;
switch (GET_OPCODE(op)) {
case SLJIT_ADD:
@@ -1746,7 +1458,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
{
CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- return reg;
+ return freg_map[reg];
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
@@ -1762,7 +1474,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
/* Floating point operators */
/* --------------------------------------------------------------------- */
-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 5))
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 6))
#define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double)
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
@@ -1786,7 +1498,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
{
if (src & SLJIT_MEM) {
/* We can ignore the temporary data store on the stack from caching point of view. */
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, TMP_REG1));
src = TMP_FREG1;
}
@@ -1796,10 +1508,10 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
if (op == SLJIT_CONV_SW_FROM_F64) {
if (FAST_IS_REG(dst)) {
- FAIL_IF(emit_op_mem2(compiler, DOUBLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, 0, 0));
- return emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, 0, 0);
+ FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
+ return emit_op_mem(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1);
}
- return emit_op_mem2(compiler, DOUBLE_DATA, TMP_FREG1, dst, dstw, 0, 0);
+ return emit_op_mem(compiler, DOUBLE_DATA, TMP_FREG1, dst, dstw, TMP_REG1);
}
#else
FAIL_IF(push_inst(compiler, FCTIWZ | FD(TMP_FREG1) | FB(src)));
@@ -1808,7 +1520,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
if (FAST_IS_REG(dst)) {
FAIL_IF(load_immediate(compiler, TMP_REG1, FLOAT_TMP_MEM_OFFSET));
FAIL_IF(push_inst(compiler, STFIWX | FS(TMP_FREG1) | A(SLJIT_SP) | B(TMP_REG1)));
- return emit_op_mem2(compiler, INT_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, 0, 0);
+ return emit_op_mem(compiler, INT_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1);
}
SLJIT_ASSERT(dst & SLJIT_MEM);
@@ -1859,21 +1571,21 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
if (FAST_IS_REG(src))
FAIL_IF(push_inst(compiler, EXTSW | S(src) | A(TMP_REG1)));
else
- FAIL_IF(emit_op_mem2(compiler, INT_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET));
+ FAIL_IF(emit_op_mem(compiler, INT_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
if (FAST_IS_REG(src)) {
- FAIL_IF(emit_op_mem2(compiler, WORD_DATA, src, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET));
- FAIL_IF(emit_op_mem2(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, dst, dstw));
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA, src, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
+ FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
}
else
- FAIL_IF(emit_op_mem2(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
+ FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, TMP_REG1));
FAIL_IF(push_inst(compiler, FCFID | FD(dst_r) | FB(TMP_FREG1)));
if (dst & SLJIT_MEM)
- return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
+ return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1);
if (op & SLJIT_F32_OP)
return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r));
return SLJIT_SUCCESS;
@@ -1889,7 +1601,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
invert_sign = 0;
}
else if (!FAST_IS_REG(src)) {
- FAIL_IF(emit_op_mem2(compiler, WORD_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW));
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
@@ -1901,17 +1613,17 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG2) | A(0) | 0x4330));
if (invert_sign)
FAIL_IF(push_inst(compiler, XORIS | S(src) | A(TMP_REG1) | 0x8000));
- FAIL_IF(emit_op_mem2(compiler, WORD_DATA, TMP_REG2, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_HI, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET));
- FAIL_IF(emit_op_mem2(compiler, WORD_DATA, TMP_REG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_HI));
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_HI, TMP_REG1));
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW, TMP_REG2));
FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG1) | A(0) | 0x8000));
- FAIL_IF(emit_op_mem2(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW));
- FAIL_IF(emit_op_mem2(compiler, WORD_DATA, TMP_REG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET));
- FAIL_IF(emit_op_mem2(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG2, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW));
+ FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW, TMP_REG2));
+ FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG2, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1));
FAIL_IF(push_inst(compiler, FSUB | FD(dst_r) | FA(TMP_FREG1) | FB(TMP_FREG2)));
if (dst & SLJIT_MEM)
- return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
+ return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1);
if (op & SLJIT_F32_OP)
return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r));
return SLJIT_SUCCESS;
@@ -1924,12 +1636,12 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
sljit_s32 src2, sljit_sw src2w)
{
if (src1 & SLJIT_MEM) {
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, TMP_REG1));
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0));
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, TMP_REG2));
src2 = TMP_FREG2;
}
@@ -1943,8 +1655,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
sljit_s32 dst_r;
CHECK_ERROR();
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
@@ -1955,7 +1665,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_MEM) {
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, dst, dstw));
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, TMP_REG1));
src = dst_r;
}
@@ -1984,7 +1694,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
}
if (dst & SLJIT_MEM)
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_r, dst, dstw, 0, 0));
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op), dst_r, dst, dstw, TMP_REG1));
return SLJIT_SUCCESS;
}
@@ -1993,7 +1703,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- sljit_s32 dst_r, flags = 0;
+ sljit_s32 dst_r;
CHECK_ERROR();
CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
@@ -2001,46 +1711,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
-
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2;
if (src1 & SLJIT_MEM) {
- if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) {
- FAIL_IF(compiler->error);
- src1 = TMP_FREG1;
- } else
- flags |= ALT_FORM1;
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, TMP_REG1));
+ src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) {
- FAIL_IF(compiler->error);
- src2 = TMP_FREG2;
- } else
- flags |= ALT_FORM2;
- }
-
- if ((flags & (ALT_FORM1 | ALT_FORM2)) == (ALT_FORM1 | ALT_FORM2)) {
- if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w));
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
- }
- else {
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
- }
- }
- else if (flags & ALT_FORM1)
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
- else if (flags & ALT_FORM2)
- FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
-
- if (flags & ALT_FORM1)
- src1 = TMP_FREG1;
- if (flags & ALT_FORM2)
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, TMP_REG2));
src2 = TMP_FREG2;
+ }
switch (GET_OPCODE(op)) {
case SLJIT_ADD_F64:
@@ -2060,13 +1741,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
break;
}
- if (dst_r == TMP_FREG2)
- FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0));
+ if (dst & SLJIT_MEM)
+ FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, TMP_REG1));
return SLJIT_SUCCESS;
}
-#undef FLOAT_DATA
#undef SELECT_FOP
/* --------------------------------------------------------------------- */
@@ -2096,12 +1776,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
if (FAST_IS_REG(src))
FAIL_IF(push_inst(compiler, MTLR | S(src)));
else {
- if (src & SLJIT_MEM)
- FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw));
- else if (src & SLJIT_IMM)
- FAIL_IF(load_immediate(compiler, TMP_REG2, srcw));
+ FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw));
FAIL_IF(push_inst(compiler, MTLR | S(TMP_REG2)));
}
+
return push_inst(compiler, BLR);
}
@@ -2183,7 +1861,7 @@ static sljit_ins get_bo_bi_flags(sljit_s32 type)
return (4 << 21) | ((4 + 3) << 16);
default:
- SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL3);
+ SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_CDECL);
return (20 << 21);
}
}
@@ -2209,7 +1887,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
if (type < SLJIT_JUMP)
jump->flags |= IS_COND;
#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL)
- if (type >= SLJIT_CALL0)
+ if (type >= SLJIT_CALL)
jump->flags |= IS_CALL;
#endif
@@ -2220,6 +1898,24 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
return jump;
}
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
+#endif
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_jump(compiler, type);
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
{
struct sljit_jump *jump = NULL;
@@ -2231,7 +1927,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
if (FAST_IS_REG(src)) {
#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL)
- if (type >= SLJIT_CALL0) {
+ if (type >= SLJIT_CALL) {
FAIL_IF(push_inst(compiler, OR | S(src) | A(TMP_CALL_REG) | B(src)));
src_r = TMP_CALL_REG;
}
@@ -2241,12 +1937,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
src_r = src;
#endif
} else if (src & SLJIT_IMM) {
+ /* These jumps are converted to jump/call instructions when possible. */
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR);
jump->u.target = srcw;
#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL)
- if (type >= SLJIT_CALL0)
+ if (type >= SLJIT_CALL)
jump->flags |= IS_CALL;
#endif
FAIL_IF(emit_const(compiler, TMP_CALL_REG, 0));
@@ -2263,6 +1960,31 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
return push_inst(compiler, BCCTR | (20 << 21) | (type >= SLJIT_FAST_CALL ? 1 : 0));
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_CALL_REG, 0, TMP_REG1, 0, src, srcw));
+ src = TMP_CALL_REG;
+ }
+
+ FAIL_IF(call_with_args(compiler, arg_types, &src));
+#endif
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_ijump(compiler, type, src, srcw);
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
@@ -2284,11 +2006,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
op = GET_OPCODE(op);
reg = (op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2;
- compiler->cache_arg = 0;
- compiler->cache_argw = 0;
-
if (op >= SLJIT_ADD && (dst & SLJIT_MEM))
- FAIL_IF(emit_op_mem2(compiler, input_flags | LOAD_DATA, TMP_REG1, dst, dstw, dst, dstw));
+ FAIL_IF(emit_op_mem(compiler, input_flags | LOAD_DATA, TMP_REG1, dst, dstw, TMP_REG1));
invert = 0;
cr_bit = 0;
@@ -2384,7 +2103,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
if (op < SLJIT_ADD) {
if (!(dst & SLJIT_MEM))
return SLJIT_SUCCESS;
- return emit_op_mem2(compiler, input_flags, reg, dst, dstw, reg, 0);
+ return emit_op_mem(compiler, input_flags, reg, dst, dstw, TMP_REG1);
}
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
@@ -2406,6 +2125,139 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 reg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_s32 mem_flags;
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
+
+ if (type & SLJIT_MEM_POST)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ switch (type & 0xff) {
+ case SLJIT_MOV:
+ case SLJIT_MOV_P:
+#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
+ case SLJIT_MOV_U32:
+ case SLJIT_MOV_S32:
+#endif
+ mem_flags = WORD_DATA;
+ break;
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ case SLJIT_MOV_U32:
+ mem_flags = INT_DATA;
+ break;
+
+ case SLJIT_MOV_S32:
+ mem_flags = INT_DATA;
+
+ if (!(type & SLJIT_MEM_STORE) && !(type & SLJIT_I32_OP)) {
+ if (mem & OFFS_REG_MASK)
+ mem_flags |= SIGNED_DATA;
+ else
+ return SLJIT_ERR_UNSUPPORTED;
+ }
+ break;
+#endif
+
+ case SLJIT_MOV_U8:
+ case SLJIT_MOV_S8:
+ mem_flags = BYTE_DATA;
+ break;
+
+ case SLJIT_MOV_U16:
+ mem_flags = HALF_DATA;
+ break;
+
+ case SLJIT_MOV_S16:
+ mem_flags = HALF_DATA | SIGNED_DATA;
+ break;
+
+ default:
+ SLJIT_UNREACHABLE();
+ mem_flags = WORD_DATA;
+ break;
+ }
+
+ if (!(type & SLJIT_MEM_STORE))
+ mem_flags |= LOAD_DATA;
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ if (memw != 0)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_MEM_SUPP)
+ return SLJIT_SUCCESS;
+
+ inst = updated_data_transfer_insts[mem_flags | INDEXED];
+ FAIL_IF(push_inst(compiler, INST_CODE_AND_DST(inst, 0, reg) | A(mem & REG_MASK) | B(OFFS_REG(mem))));
+ }
+ else {
+ if (memw > SIMM_MAX || memw < SIMM_MIN)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ inst = updated_data_transfer_insts[mem_flags];
+
+#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
+ if ((inst & INT_ALIGNED) && (memw & 0x3) != 0)
+ return SLJIT_ERR_UNSUPPORTED;
+#endif
+
+ if (type & SLJIT_MEM_SUPP)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst(compiler, INST_CODE_AND_DST(inst, 0, reg) | A(mem & REG_MASK) | IMM(memw)));
+ }
+
+ if ((mem_flags & LOAD_DATA) && (type & 0xff) == SLJIT_MOV_S8)
+ return push_inst(compiler, EXTSB | S(reg) | A(reg));
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 mem, sljit_sw memw)
+{
+ sljit_s32 mem_flags;
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
+
+ if (type & SLJIT_MEM_POST)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ if (memw != 0)
+ return SLJIT_ERR_UNSUPPORTED;
+ }
+ else {
+ if (memw > SIMM_MAX || memw < SIMM_MIN)
+ return SLJIT_ERR_UNSUPPORTED;
+ }
+
+ if (type & SLJIT_MEM_SUPP)
+ return SLJIT_SUCCESS;
+
+ mem_flags = FLOAT_DATA(type);
+
+ if (!(type & SLJIT_MEM_STORE))
+ mem_flags |= LOAD_DATA;
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ inst = updated_data_transfer_insts[mem_flags | INDEXED];
+ return push_inst(compiler, INST_CODE_AND_DST(inst, DOUBLE_DATA, freg) | A(mem & REG_MASK) | B(OFFS_REG(mem)));
+ }
+
+ inst = updated_data_transfer_insts[mem_flags];
+ return push_inst(compiler, INST_CODE_AND_DST(inst, DOUBLE_DATA, freg) | A(mem & REG_MASK) | IMM(memw));
+}
+
SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
{
struct sljit_const *const_;
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c
index ee42130e87..0671b130cc 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_32.c
@@ -138,6 +138,125 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
return SLJIT_SUCCESS;
}
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src)
+{
+ sljit_s32 reg_index = 8;
+ sljit_s32 word_reg_index = 8;
+ sljit_s32 float_arg_index = 1;
+ sljit_s32 double_arg_count = 0;
+ sljit_s32 float_offset = (16 + 6) * sizeof(sljit_sw);
+ sljit_s32 types = 0;
+ sljit_s32 reg = 0;
+ sljit_s32 move_to_tmp2 = 0;
+
+ if (src)
+ reg = reg_map[*src & REG_MASK];
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ float_arg_index++;
+ if (reg_index == reg)
+ move_to_tmp2 = 1;
+ reg_index++;
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ float_arg_index++;
+ double_arg_count++;
+ if (reg_index == reg || reg_index + 1 == reg)
+ move_to_tmp2 = 1;
+ reg_index += 2;
+ break;
+ default:
+ if (reg_index != word_reg_index && reg_index < 14 && reg_index == reg)
+ move_to_tmp2 = 1;
+ reg_index++;
+ word_reg_index++;
+ break;
+ }
+
+ if (move_to_tmp2) {
+ move_to_tmp2 = 0;
+ if (reg < 14)
+ FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2A(reg), DR(TMP_REG1)));
+ *src = TMP_REG1;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ arg_types = types;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ float_arg_index--;
+ FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
+ float_offset -= sizeof(sljit_f64);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ float_arg_index--;
+ if (float_arg_index == 4 && double_arg_count == 4) {
+ FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM((16 + 7) * sizeof(sljit_sw)), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | (1 << 25) | S1(SLJIT_SP) | IMM((16 + 8) * sizeof(sljit_sw)), MOVABLE_INS));
+ }
+ else
+ FAIL_IF(push_inst(compiler, STDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
+ float_offset -= sizeof(sljit_f64);
+ break;
+ default:
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ float_offset = (16 + 6) * sizeof(sljit_sw);
+
+ while (types) {
+ switch (types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ reg_index--;
+ if (reg_index < 14)
+ FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
+ float_offset -= sizeof(sljit_f64);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ reg_index -= 2;
+ if (reg_index < 14) {
+ if ((reg_index & 0x1) != 0) {
+ FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
+ if (reg_index < 13)
+ FAIL_IF(push_inst(compiler, LDUW | DA(reg_index + 1) | S1(SLJIT_SP) | IMM(float_offset + sizeof(sljit_sw)), reg_index + 1));
+ }
+ else
+ FAIL_IF(push_inst(compiler, LDD | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
+ }
+ float_offset -= sizeof(sljit_f64);
+ break;
+ default:
+ reg_index--;
+ word_reg_index--;
+
+ if (reg_index != word_reg_index) {
+ if (reg_index < 14)
+ FAIL_IF(push_inst(compiler, OR | DA(reg_index) | S1(0) | S2A(word_reg_index), reg_index));
+ else
+ FAIL_IF(push_inst(compiler, STW | DA(word_reg_index) | S1(SLJIT_SP) | IMM(92), word_reg_index));
+ }
+ break;
+ }
+
+ types >>= SLJIT_DEF_SHIFT;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value)
{
FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((init_value >> 10) & 0x3fffff), DR(dst)));
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c
index 9831bd83d7..669ecd8152 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeSPARC_common.c
@@ -90,13 +90,19 @@ static void sparc_cache_flush(sljit_ins *from, sljit_ins *to)
#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
#define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4)
+/* This register is modified by calls, which affects the instruction
+ in the delay slot if it is used as a source register. */
#define TMP_LINK (SLJIT_NUMBER_OF_REGISTERS + 5)
-#define TMP_FREG1 (0)
-#define TMP_FREG2 ((SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) << 1)
+#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
+#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = {
- 0, 8, 9, 10, 13, 29, 28, 27, 23, 22, 21, 20, 19, 18, 17, 16, 26, 25, 24, 14, 1, 11, 12, 15
+ 0, 8, 9, 10, 11, 29, 28, 27, 23, 22, 21, 20, 19, 18, 17, 16, 26, 25, 24, 14, 1, 12, 13, 15
+};
+
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
+ 0, 0, 2, 4, 6, 8, 10, 12, 14
};
/* --------------------------------------------------------------------- */
@@ -104,10 +110,15 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = {
/* --------------------------------------------------------------------- */
#define D(d) (reg_map[d] << 25)
+#define FD(d) (freg_map[d] << 25)
+#define FDN(d) ((freg_map[d] | 0x1) << 25)
#define DA(d) ((d) << 25)
#define S1(s1) (reg_map[s1] << 14)
-#define S2(s2) (reg_map[s2])
+#define FS1(s1) (freg_map[s1] << 14)
#define S1A(s1) ((s1) << 14)
+#define S2(s2) (reg_map[s2])
+#define FS2(s2) (freg_map[s2])
+#define FS2N(s2) (freg_map[s2] | 0x1)
#define S2A(s2) (s2)
#define IMM_ARG 0x2000
#define DOP(op) ((op) << 5)
@@ -144,6 +155,8 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = {
#define FSUBD (OPC1(0x2) | OPC3(0x34) | DOP(0x46))
#define FSUBS (OPC1(0x2) | OPC3(0x34) | DOP(0x45))
#define JMPL (OPC1(0x2) | OPC3(0x38))
+#define LDD (OPC1(0x3) | OPC3(0x03))
+#define LDUW (OPC1(0x3) | OPC3(0x00))
#define NOP (OPC1(0x0) | OPC2(0x04))
#define OR (OPC1(0x2) | OPC3(0x02))
#define ORN (OPC1(0x2) | OPC3(0x06))
@@ -157,6 +170,9 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = {
#define SRAX (OPC1(0x2) | OPC3(0x27) | (1 << 12))
#define SRL (OPC1(0x2) | OPC3(0x26))
#define SRLX (OPC1(0x2) | OPC3(0x26) | (1 << 12))
+#define STDF (OPC1(0x3) | OPC3(0x27))
+#define STF (OPC1(0x3) | OPC3(0x24))
+#define STW (OPC1(0x3) | OPC3(0x04))
#define SUB (OPC1(0x2) | OPC3(0x04))
#define SUBC (OPC1(0x2) | OPC3(0x0c))
#define TA (OPC1(0x2) | OPC3(0x3a) | (8 << 25))
@@ -433,18 +449,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#define MEM_MASK 0x1f
-#define WRITE_BACK 0x00020
-#define ARG_TEST 0x00040
-#define ALT_KEEP_CACHE 0x00080
-#define CUMULATIVE_OP 0x00100
-#define IMM_OP 0x00200
-#define SRC2_IMM 0x00400
+#define ARG_TEST 0x00020
+#define ALT_KEEP_CACHE 0x00040
+#define CUMULATIVE_OP 0x00080
+#define IMM_OP 0x00100
+#define SRC2_IMM 0x00200
-#define REG_DEST 0x00800
-#define REG2_SOURCE 0x01000
-#define SLOW_SRC1 0x02000
-#define SLOW_SRC2 0x04000
-#define SLOW_DEST 0x08000
+#define REG_DEST 0x00400
+#define REG2_SOURCE 0x00800
+#define SLOW_SRC1 0x01000
+#define SLOW_SRC2 0x02000
+#define SLOW_DEST 0x04000
/* SET_FLAGS (0x10 << 19) also belong here! */
@@ -455,12 +470,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#endif
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
CHECK_ERROR();
- CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7;
compiler->local_size = local_size;
@@ -479,12 +494,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
CHECK_ERROR();
- CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7;
return SLJIT_SUCCESS;
@@ -546,18 +561,16 @@ static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flag
{
SLJIT_ASSERT(arg & SLJIT_MEM);
- if (!(flags & WRITE_BACK) || !(arg & REG_MASK)) {
- if ((!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN)
- || ((arg & OFFS_REG_MASK) && (argw & 0x3) == 0)) {
- /* Works for both absoulte and relative addresses (immediate case). */
- if (SLJIT_UNLIKELY(flags & ARG_TEST))
- return 1;
- FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK]
- | ((flags & MEM_MASK) <= GPR_REG ? D(reg) : DA(reg))
- | S1(arg & REG_MASK) | ((arg & OFFS_REG_MASK) ? S2(OFFS_REG(arg)) : IMM(argw)),
- ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS));
- return -1;
- }
+ if ((!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN)
+ || ((arg & OFFS_REG_MASK) && (argw & 0x3) == 0)) {
+ /* Works for both absoulte and relative addresses (immediate case). */
+ if (SLJIT_UNLIKELY(flags & ARG_TEST))
+ return 1;
+ FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK]
+ | ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg))
+ | S1(arg & REG_MASK) | ((arg & OFFS_REG_MASK) ? S2(OFFS_REG(arg)) : IMM(argw)),
+ ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS));
+ return -1;
}
return 0;
}
@@ -638,14 +651,11 @@ static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sl
}
}
- dest = ((flags & MEM_MASK) <= GPR_REG ? D(reg) : DA(reg));
+ dest = ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg));
delay_slot = ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS;
if (!base)
return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(arg2) | IMM(0), delay_slot);
- if (!(flags & WRITE_BACK))
- return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot);
- FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot));
- return push_inst(compiler, ADD | D(base) | S1(base) | S2(arg2), DR(base));
+ return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot);
}
static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
@@ -687,7 +697,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
if (FAST_IS_REG(dst)) {
dst_r = dst;
flags |= REG_DEST;
- if (op >= SLJIT_MOV && op <= SLJIT_MOVU_S32)
+ if (op >= SLJIT_MOV && op <= SLJIT_MOV_P)
sugg_src2_r = dst_r;
}
else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, TMP_REG1, dst, dstw))
@@ -738,7 +748,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
if (FAST_IS_REG(src2)) {
src2_r = src2;
flags |= REG2_SOURCE;
- if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_S32)
+ if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P)
dst_r = src2_r;
}
else if (src2 & SLJIT_IMM) {
@@ -749,7 +759,7 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3
}
else {
src2_r = 0;
- if ((op >= SLJIT_MOV && op <= SLJIT_MOVU_S32) && (dst & SLJIT_MEM))
+ if ((op >= SLJIT_MOV && op <= SLJIT_MOV_P) && (dst & SLJIT_MEM))
dst_r = 0;
}
}
@@ -875,28 +885,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
case SLJIT_MOV_S16:
return emit_op(compiler, SLJIT_MOV_S16, flags | HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
- case SLJIT_MOVU:
- case SLJIT_MOVU_P:
- return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw);
-
- case SLJIT_MOVU_U32:
- return emit_op(compiler, SLJIT_MOV_U32, flags | INT_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw);
-
- case SLJIT_MOVU_S32:
- return emit_op(compiler, SLJIT_MOV_S32, flags | INT_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw);
-
- case SLJIT_MOVU_U8:
- return emit_op(compiler, SLJIT_MOV_U8, flags | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
-
- case SLJIT_MOVU_S8:
- return emit_op(compiler, SLJIT_MOV_S8, flags | BYTE_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
-
- case SLJIT_MOVU_U16:
- return emit_op(compiler, SLJIT_MOV_U16, flags | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
-
- case SLJIT_MOVU_S16:
- return emit_op(compiler, SLJIT_MOV_S16, flags | HALF_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
-
case SLJIT_NOT:
case SLJIT_CLZ:
return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
@@ -962,7 +950,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
{
CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- return reg << 1;
+ return freg_map[reg];
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
@@ -990,10 +978,8 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
src = TMP_FREG1;
}
- else
- src <<= 1;
- FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOI, FDTOI) | DA(TMP_FREG1) | S2A(src), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOI, FDTOI) | FD(TMP_FREG1) | FS2(src), MOVABLE_INS));
if (FAST_IS_REG(dst)) {
FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET));
@@ -1008,7 +994,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
- sljit_s32 dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -1027,7 +1013,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
}
FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
- FAIL_IF(push_inst(compiler, SELECT_FOP(op, FITOS, FITOD) | DA(dst_r) | S2A(TMP_FREG1), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, SELECT_FOP(op, FITOS, FITOD) | FD(dst_r) | FS2(TMP_FREG1), MOVABLE_INS));
if (dst & SLJIT_MEM)
return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
@@ -1042,17 +1028,13 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
src1 = TMP_FREG1;
}
- else
- src1 <<= 1;
if (src2 & SLJIT_MEM) {
FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0));
src2 = TMP_FREG2;
}
- else
- src2 <<= 1;
- return push_inst(compiler, SELECT_FOP(op, FCMPS, FCMPD) | S1A(src1) | S2A(src2), FCC_IS_SET | MOVABLE_INS);
+ return push_inst(compiler, SELECT_FOP(op, FCMPS, FCMPD) | FS1(src1) | FS2(src2), FCC_IS_SET | MOVABLE_INS);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1071,39 +1053,37 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
op ^= SLJIT_F32_OP;
- dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_MEM) {
FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, dst, dstw));
src = dst_r;
}
- else
- src <<= 1;
switch (GET_OPCODE(op)) {
case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1) {
- FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r) | S2A(src), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, FMOVS | FD(dst_r) | FS2(src), MOVABLE_INS));
if (!(op & SLJIT_F32_OP))
- FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r | 1) | S2A(src | 1), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
}
else
dst_r = src;
}
break;
case SLJIT_NEG_F64:
- FAIL_IF(push_inst(compiler, FNEGS | DA(dst_r) | S2A(src), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, FNEGS | FD(dst_r) | FS2(src), MOVABLE_INS));
if (dst_r != src && !(op & SLJIT_F32_OP))
- FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r | 1) | S2A(src | 1), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
break;
case SLJIT_ABS_F64:
- FAIL_IF(push_inst(compiler, FABSS | DA(dst_r) | S2A(src), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, FABSS | FD(dst_r) | FS2(src), MOVABLE_INS));
if (dst_r != src && !(op & SLJIT_F32_OP))
- FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r | 1) | S2A(src | 1), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
break;
case SLJIT_CONV_F64_FROM_F32:
- FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOD, FDTOS) | DA(dst_r) | S2A(src), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOD, FDTOS) | FD(dst_r) | FS2(src), MOVABLE_INS));
op ^= SLJIT_F32_OP;
break;
}
@@ -1129,7 +1109,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG2;
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2;
if (src1 & SLJIT_MEM) {
if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) {
@@ -1138,8 +1118,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
} else
flags |= SLOW_SRC1;
}
- else
- src1 <<= 1;
if (src2 & SLJIT_MEM) {
if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) {
@@ -1148,8 +1126,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
} else
flags |= SLOW_SRC2;
}
- else
- src2 <<= 1;
if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) {
if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
@@ -1173,19 +1149,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
switch (GET_OPCODE(op)) {
case SLJIT_ADD_F64:
- FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADDD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADDD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
break;
case SLJIT_SUB_F64:
- FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUBD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUBD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
break;
case SLJIT_MUL_F64:
- FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMULD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMULD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
break;
case SLJIT_DIV_F64:
- FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIVD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIVD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
break;
}
@@ -1223,10 +1199,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
if (FAST_IS_REG(src))
FAIL_IF(push_inst(compiler, OR | D(TMP_LINK) | S1(0) | S2(src), DR(TMP_LINK)));
- else if (src & SLJIT_MEM)
+ else
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_LINK, src, srcw));
- else if (src & SLJIT_IMM)
- FAIL_IF(load_immediate(compiler, TMP_LINK, srcw));
FAIL_IF(push_inst(compiler, JMPL | D(0) | S1(TMP_LINK) | IMM(8), UNMOVABLE_INS));
return push_inst(compiler, NOP, UNMOVABLE_INS);
@@ -1339,21 +1313,38 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
#else
#error "Implementation required"
#endif
- } else {
+ }
+ else {
if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS)
jump->flags |= IS_MOVABLE;
if (type >= SLJIT_FAST_CALL)
jump->flags |= IS_CALL;
}
- PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0));
- PTR_FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(TMP_REG2) | IMM(0), UNMOVABLE_INS));
+ PTR_FAIL_IF(emit_const(compiler, TMP_REG1, 0));
+ PTR_FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(TMP_REG1) | IMM(0), UNMOVABLE_INS));
jump->addr = compiler->size;
PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
return jump;
}
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_jump(compiler, type);
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
{
struct sljit_jump *jump = NULL;
@@ -1370,17 +1361,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
FAIL_IF(!jump);
set_jump(jump, compiler, JUMP_ADDR);
jump->u.target = srcw;
+
if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS)
jump->flags |= IS_MOVABLE;
if (type >= SLJIT_FAST_CALL)
jump->flags |= IS_CALL;
- FAIL_IF(emit_const(compiler, TMP_REG2, 0));
- src_r = TMP_REG2;
+ FAIL_IF(emit_const(compiler, TMP_REG1, 0));
+ src_r = TMP_REG1;
}
else {
- FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw));
- src_r = TMP_REG2;
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw));
+ src_r = TMP_REG1;
}
FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(src_r) | IMM(0), UNMOVABLE_INS));
@@ -1389,6 +1381,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
return push_inst(compiler, NOP, UNMOVABLE_INS);
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw));
+ src = TMP_REG1;
+ }
+
+ FAIL_IF(call_with_args(compiler, arg_types, &src));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_ijump(compiler, type, src, srcw);
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 type)
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c
index f5cf8834b0..8a83e273a4 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_32.c
@@ -64,29 +64,28 @@ static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 size;
+ sljit_s32 args, size;
sljit_u8 *inst;
CHECK_ERROR();
- CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
+ args = get_arg_count(arg_types);
compiler->args = args;
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- /* [esp+0] for saving temporaries and third argument for calls. */
- compiler->saveds_offset = 1 * sizeof(sljit_sw);
-#else
- /* [esp+0] for saving temporaries and space for maximum three arguments. */
- if (scratches <= 1)
- compiler->saveds_offset = 1 * sizeof(sljit_sw);
- else
- compiler->saveds_offset = ((scratches == 2) ? 2 : 3) * sizeof(sljit_sw);
+ /* [esp+0] for saving temporaries and function calls. */
+ compiler->stack_tmp_size = 2 * sizeof(sljit_sw);
+
+#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+ if (scratches > 3)
+ compiler->stack_tmp_size = 3 * sizeof(sljit_sw);
#endif
+ compiler->saveds_offset = compiler->stack_tmp_size;
if (scratches > 3)
compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * sizeof(sljit_sw);
@@ -178,10 +177,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
/* Space for a single argument. This amount is excluded when the stack is allocated below. */
local_size -= sizeof(sljit_sw);
FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], local_size));
- FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32,
+ FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, sizeof(sljit_sw)));
#endif
- FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack)));
+ FAIL_IF(sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARG1(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack)));
}
#endif
@@ -192,12 +191,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_SP, 0);
/* Some space might allocated during sljit_grow_stack() above on WIN32. */
- FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32,
+ FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size + sizeof(sljit_sw)));
#if defined _WIN32 && !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
if (compiler->local_size > 1024)
- FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
+ FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, sizeof(sljit_sw)));
#endif
@@ -213,31 +212,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), compiler->local_size, TMP_REG1, 0);
}
#endif
- return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32,
+ return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
CHECK_ERROR();
- CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
- compiler->args = args;
+ compiler->args = get_arg_count(arg_types);
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- /* [esp+0] for saving temporaries and third argument for calls. */
- compiler->saveds_offset = 1 * sizeof(sljit_sw);
-#else
- /* [esp+0] for saving temporaries and space for maximum three arguments. */
- if (scratches <= 1)
- compiler->saveds_offset = 1 * sizeof(sljit_sw);
- else
- compiler->saveds_offset = ((scratches == 2) ? 2 : 3) * sizeof(sljit_sw);
+ /* [esp+0] for saving temporaries and function calls. */
+ compiler->stack_tmp_size = 2 * sizeof(sljit_sw);
+
+#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+ if (scratches > 3)
+ compiler->stack_tmp_size = 3 * sizeof(sljit_sw);
#endif
+ compiler->saveds_offset = compiler->stack_tmp_size;
if (scratches > 3)
compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * sizeof(sljit_sw);
@@ -278,10 +275,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
if (compiler->options & SLJIT_F64_ALIGNMENT)
EMIT_MOV(compiler, SLJIT_SP, 0, SLJIT_MEM1(SLJIT_SP), compiler->local_size)
else
- FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
+ FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size));
#else
- FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
+ FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size));
#endif
@@ -418,7 +415,7 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
*inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81;
- if ((a & SLJIT_IMM) || (a == 0))
+ if (a & SLJIT_IMM)
*buf_ptr = 0;
else if (!(flags & EX86_SSE2_OP1))
*buf_ptr = reg_map[a] << 3;
@@ -490,42 +487,324 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
/* Call / return instructions */
/* --------------------------------------------------------------------- */
-static SLJIT_INLINE sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 type)
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+
+static sljit_s32 c_fast_call_get_stack_size(sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr)
{
- sljit_u8 *inst;
+ sljit_s32 stack_size = 0;
+ sljit_s32 word_arg_count = 0;
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ stack_size += sizeof(sljit_f32);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ stack_size += sizeof(sljit_f64);
+ break;
+ default:
+ word_arg_count++;
+ if (word_arg_count > 2)
+ stack_size += sizeof(sljit_sw);
+ break;
+ }
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- inst = (sljit_u8*)ensure_buf(compiler, type >= SLJIT_CALL3 ? 1 + 2 + 1 : 1 + 2);
- FAIL_IF(!inst);
- INC_SIZE(type >= SLJIT_CALL3 ? 2 + 1 : 2);
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ if (word_arg_count_ptr)
+ *word_arg_count_ptr = word_arg_count;
+
+ return stack_size;
+}
- if (type >= SLJIT_CALL3)
+static sljit_s32 c_fast_call_with_args(struct sljit_compiler *compiler,
+ sljit_s32 arg_types, sljit_s32 stack_size, sljit_s32 word_arg_count, sljit_s32 swap_args)
+{
+ sljit_u8 *inst;
+ sljit_s32 float_arg_count;
+
+ if (stack_size == sizeof(sljit_sw) && word_arg_count == 3) {
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
+ FAIL_IF(!inst);
+ INC_SIZE(1);
PUSH_REG(reg_map[SLJIT_R2]);
- *inst++ = MOV_r_rm;
- *inst++ = MOD_REG | (reg_map[SLJIT_R2] << 3) | reg_map[SLJIT_R0];
+ }
+ else if (stack_size > 0) {
+ if (word_arg_count >= 4)
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), compiler->saveds_offset - sizeof(sljit_sw));
+
+ FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
+ SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size));
+
+ stack_size = 0;
+ arg_types >>= SLJIT_DEF_SHIFT;
+ word_arg_count = 0;
+ float_arg_count = 0;
+ while (arg_types) {
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ float_arg_count++;
+ FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
+ stack_size += sizeof(sljit_f32);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ float_arg_count++;
+ FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
+ stack_size += sizeof(sljit_f64);
+ break;
+ default:
+ word_arg_count++;
+ if (word_arg_count == 3) {
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, SLJIT_R2, 0);
+ stack_size += sizeof(sljit_sw);
+ }
+ else if (word_arg_count == 4) {
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, TMP_REG1, 0);
+ stack_size += sizeof(sljit_sw);
+ }
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+ }
+
+ if (word_arg_count > 0) {
+ if (swap_args) {
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
+ FAIL_IF(!inst);
+ INC_SIZE(1);
+
+ *inst++ = XCHG_EAX_r | reg_map[SLJIT_R2];
+ }
+ else {
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
+ FAIL_IF(!inst);
+ INC_SIZE(2);
+
+ *inst++ = MOV_r_rm;
+ *inst++ = MOD_REG | (reg_map[SLJIT_R2] << 3) | reg_map[SLJIT_R0];
+ }
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+#endif
+
+static sljit_s32 cdecl_call_get_stack_size(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr)
+{
+ sljit_s32 stack_size = 0;
+ sljit_s32 word_arg_count = 0;
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ stack_size += sizeof(sljit_f32);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ stack_size += sizeof(sljit_f64);
+ break;
+ default:
+ word_arg_count++;
+ stack_size += sizeof(sljit_sw);
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ if (word_arg_count_ptr)
+ *word_arg_count_ptr = word_arg_count;
+
+ if (stack_size <= compiler->stack_tmp_size)
+ return 0;
+
+#if defined(__APPLE__)
+ return ((stack_size - compiler->stack_tmp_size + 15) & ~15);
#else
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 * (type - SLJIT_CALL0));
+ return stack_size - compiler->stack_tmp_size;
+#endif
+}
+
+static sljit_s32 cdecl_call_with_args(struct sljit_compiler *compiler,
+ sljit_s32 arg_types, sljit_s32 stack_size, sljit_s32 word_arg_count)
+{
+ sljit_s32 float_arg_count = 0;
+
+ if (word_arg_count >= 4)
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), compiler->saveds_offset - sizeof(sljit_sw));
+
+ if (stack_size > 0)
+ FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
+ SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size));
+
+ stack_size = 0;
+ word_arg_count = 0;
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ float_arg_count++;
+ FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
+ stack_size += sizeof(sljit_f32);
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ float_arg_count++;
+ FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count));
+ stack_size += sizeof(sljit_f64);
+ break;
+ default:
+ word_arg_count++;
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, (word_arg_count >= 4) ? TMP_REG1 : word_arg_count, 0);
+ stack_size += sizeof(sljit_sw);
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
+ }
+
+ return SLJIT_SUCCESS;
+}
+
+static sljit_s32 post_call_with_args(struct sljit_compiler *compiler,
+ sljit_s32 arg_types, sljit_s32 stack_size)
+{
+ sljit_u8 *inst;
+ sljit_s32 single;
+
+ if (stack_size > 0)
+ FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD),
+ SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size));
+
+ if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32)
+ return SLJIT_SUCCESS;
+
+ single = ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32);
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
FAIL_IF(!inst);
- INC_SIZE(4 * (type - SLJIT_CALL0));
-
- *inst++ = MOV_rm_r;
- *inst++ = MOD_DISP8 | (reg_map[SLJIT_R0] << 3) | 0x4 /* SIB */;
- *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP];
- *inst++ = 0;
- if (type >= SLJIT_CALL2) {
- *inst++ = MOV_rm_r;
- *inst++ = MOD_DISP8 | (reg_map[SLJIT_R1] << 3) | 0x4 /* SIB */;
- *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP];
- *inst++ = sizeof(sljit_sw);
+ INC_SIZE(3);
+ inst[0] = single ? FSTPS : FSTPD;
+ inst[1] = (0x03 << 3) | 0x04;
+ inst[2] = (0x04 << 3) | reg_map[SLJIT_SP];
+
+ return emit_sse2_load(compiler, single, SLJIT_FR0, SLJIT_MEM1(SLJIT_SP), 0);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+ struct sljit_jump *jump;
+ sljit_s32 stack_size = 0;
+ sljit_s32 word_arg_count;
+
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+ if ((type & 0xff) == SLJIT_CALL) {
+ stack_size = c_fast_call_get_stack_size(arg_types, &word_arg_count);
+ PTR_FAIL_IF(c_fast_call_with_args(compiler, arg_types, stack_size, word_arg_count, 0));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ jump = sljit_emit_jump(compiler, type);
+ PTR_FAIL_IF(jump == NULL);
+
+ PTR_FAIL_IF(post_call_with_args(compiler, arg_types, 0));
+ return jump;
}
- if (type >= SLJIT_CALL3) {
- *inst++ = MOV_rm_r;
- *inst++ = MOD_DISP8 | (reg_map[SLJIT_R2] << 3) | 0x4 /* SIB */;
- *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP];
- *inst++ = 2 * sizeof(sljit_sw);
+#endif
+
+ stack_size = cdecl_call_get_stack_size(compiler, arg_types, &word_arg_count);
+ PTR_FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, word_arg_count));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ jump = sljit_emit_jump(compiler, type);
+ PTR_FAIL_IF(jump == NULL);
+
+ PTR_FAIL_IF(post_call_with_args(compiler, arg_types, stack_size));
+ return jump;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 stack_size = 0;
+ sljit_s32 word_arg_count;
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+ sljit_s32 swap_args;
+#endif
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+ SLJIT_ASSERT(reg_map[SLJIT_R0] == 0 && reg_map[SLJIT_R2] == 1 && SLJIT_R0 == 1 && SLJIT_R2 == 3);
+
+ if ((type & 0xff) == SLJIT_CALL) {
+ stack_size = c_fast_call_get_stack_size(arg_types, &word_arg_count);
+ swap_args = 0;
+
+ if (word_arg_count > 0) {
+ if ((src & REG_MASK) == SLJIT_R2 || OFFS_REG(src) == SLJIT_R2) {
+ swap_args = 1;
+ if (((src & REG_MASK) | 0x2) == SLJIT_R2)
+ src ^= 0x2;
+ if ((OFFS_REG(src) | 0x2) == SLJIT_R2)
+ src ^= TO_OFFS_REG(0x2);
+ }
+ }
+
+ FAIL_IF(c_fast_call_with_args(compiler, arg_types, stack_size, word_arg_count, swap_args));
+
+ compiler->saveds_offset += stack_size;
+ compiler->locals_offset += stack_size;
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+ FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
+
+ compiler->saveds_offset -= stack_size;
+ compiler->locals_offset -= stack_size;
+
+ return post_call_with_args(compiler, arg_types, 0);
}
#endif
- return SLJIT_SUCCESS;
+
+ stack_size = cdecl_call_get_stack_size(compiler, arg_types, &word_arg_count);
+ FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, word_arg_count));
+
+ compiler->saveds_offset += stack_size;
+ compiler->locals_offset += stack_size;
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+ FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw));
+
+ compiler->saveds_offset -= stack_size;
+ compiler->locals_offset -= stack_size;
+
+ return post_call_with_args(compiler, arg_types, stack_size);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
@@ -576,7 +855,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
INC_SIZE(1 + 1);
PUSH_REG(reg_map[src]);
}
- else if (src & SLJIT_MEM) {
+ else {
inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
FAIL_IF(!inst);
*inst++ = GROUP_FF;
@@ -586,16 +865,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
FAIL_IF(!inst);
INC_SIZE(1);
}
- else {
- /* SLJIT_IMM. */
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 5 + 1);
- FAIL_IF(!inst);
-
- INC_SIZE(5 + 1);
- *inst++ = PUSH_i32;
- sljit_unaligned_store_sw(inst, srcw);
- inst += sizeof(sljit_sw);
- }
RET();
return SLJIT_SUCCESS;
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c
index 039b68c45a..635ebd087c 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_64.c
@@ -41,24 +41,31 @@ static sljit_s32 emit_load_imm64(struct sljit_compiler *compiler, sljit_s32 reg,
static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_s32 type)
{
+ int short_addr = !(jump->flags & SLJIT_REWRITABLE_JUMP) && !(jump->flags & JUMP_LABEL) && (jump->u.target <= 0xffffffff);
+
+ /* The relative jump below specialized for this case. */
+ SLJIT_ASSERT(reg_map[TMP_REG2] >= 8);
+
if (type < SLJIT_JUMP) {
/* Invert type. */
*code_ptr++ = get_jump_code(type ^ 0x1) - 0x10;
- *code_ptr++ = 10 + 3;
+ *code_ptr++ = short_addr ? (6 + 3) : (10 + 3);
}
- *code_ptr++ = REX_W | ((reg_map[TMP_REG2] <= 7) ? 0 : REX_B);
+ *code_ptr++ = short_addr ? REX_B : (REX_W | REX_B);
*code_ptr++ = MOV_r_i32 | reg_lmap[TMP_REG2];
jump->addr = (sljit_uw)code_ptr;
if (jump->flags & JUMP_LABEL)
jump->flags |= PATCH_MD;
+ else if (short_addr)
+ sljit_unaligned_store_s32(code_ptr, (sljit_s32)jump->u.target);
else
sljit_unaligned_store_sw(code_ptr, jump->u.target);
- code_ptr += sizeof(sljit_sw);
- if (reg_map[TMP_REG2] >= 8)
- *code_ptr++ = REX_B;
+ code_ptr += short_addr ? sizeof(sljit_s32) : sizeof(sljit_sw);
+
+ *code_ptr++ = REX_B;
*code_ptr++ = GROUP_FF;
*code_ptr++ = MOD_REG | (type >= SLJIT_FAST_CALL ? CALL_rm : JMP_rm) | reg_lmap[TMP_REG2];
@@ -66,15 +73,15 @@ static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 i, tmp, size, saved_register_size;
+ sljit_s32 args, i, tmp, size, saved_register_size;
sljit_u8 *inst;
CHECK_ERROR();
- CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
#ifdef _WIN64
/* Two/four register slots for parameters plus space for xmm6 register if needed. */
@@ -108,6 +115,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
PUSH_REG(reg_lmap[i]);
}
+ args = get_arg_count(arg_types);
+
if (args > 0) {
size = args * 3;
inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
@@ -182,7 +191,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
compiler->skip_checks = 1;
#endif
- FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack)));
+ FAIL_IF(sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARG1(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack)));
}
#endif
@@ -223,14 +232,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
- sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds,
+ sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
sljit_s32 saved_register_size;
CHECK_ERROR();
- CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
- set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);
+ CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
+ set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
#ifdef _WIN64
/* Two/four register slots for parameters plus space for xmm6 register if needed. */
@@ -414,7 +423,11 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
}
}
}
- else if (!(flags & EX86_SSE2_OP2) && reg_map[b] >= 8)
+ else if (!(flags & EX86_SSE2_OP2)) {
+ if (reg_map[b] >= 8)
+ rex |= REX_B;
+ }
+ else if (freg_map[b] >= 8)
rex |= REX_B;
if (a & SLJIT_IMM) {
@@ -441,7 +454,11 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
else {
SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
/* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */
- if (!(flags & EX86_SSE2_OP1) && reg_map[a] >= 8)
+ if (!(flags & EX86_SSE2_OP1)) {
+ if (reg_map[a] >= 8)
+ rex |= REX_R;
+ }
+ else if (freg_map[a] >= 8)
rex |= REX_R;
}
@@ -468,12 +485,12 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
*inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81;
- if ((a & SLJIT_IMM) || (a == 0))
+ if (a & SLJIT_IMM)
*buf_ptr = 0;
else if (!(flags & EX86_SSE2_OP1))
*buf_ptr = reg_lmap[a] << 3;
else
- *buf_ptr = a << 3;
+ *buf_ptr = freg_lmap[a] << 3;
}
else {
if (a & SLJIT_IMM) {
@@ -487,7 +504,7 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
}
if (!(b & SLJIT_MEM))
- *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_lmap[b] : b);
+ *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_lmap[b] : freg_lmap[b]);
else if ((b & REG_MASK) != SLJIT_UNUSED) {
if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) {
if (immb != 0 || reg_lmap[b & REG_MASK] == 5) {
@@ -545,45 +562,161 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
/* Call / return instructions */
/* --------------------------------------------------------------------- */
-static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 type)
+#ifndef _WIN64
+
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
{
- sljit_u8 *inst;
+ sljit_s32 src = src_ptr ? (*src_ptr) : 0;
+ sljit_s32 word_arg_count = 0;
- /* After any change update IS_REG_CHANGED_BY_CALL as well. */
-#ifndef _WIN64
- SLJIT_ASSERT(reg_map[SLJIT_R1] == 6 && reg_map[SLJIT_R0] < 8 && reg_map[SLJIT_R2] < 8 && reg_map[TMP_REG1] == 2);
+ SLJIT_ASSERT(reg_map[SLJIT_R1] == 6 && reg_map[SLJIT_R3] == 1 && reg_map[TMP_REG1] == 2);
- inst = (sljit_u8*)ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 6));
- FAIL_IF(!inst);
- INC_SIZE((type < SLJIT_CALL3) ? 3 : 6);
- if (type >= SLJIT_CALL3) {
- /* Move third argument to TMP_REG1. */
- *inst++ = REX_W;
- *inst++ = MOV_r_rm;
- *inst++ = MOD_REG | (0x2 /* rdx */ << 3) | reg_lmap[SLJIT_R2];
+ compiler->mode32 = 0;
+
+ /* Remove return value. */
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32)
+ word_arg_count++;
+ arg_types >>= SLJIT_DEF_SHIFT;
}
- *inst++ = REX_W;
- *inst++ = MOV_r_rm;
- *inst++ = MOD_REG | (0x7 /* rdi */ << 3) | reg_lmap[SLJIT_R0];
+
+ if (word_arg_count == 0)
+ return SLJIT_SUCCESS;
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
+ *src_ptr = TMP_REG2;
+ }
+ else if (src == SLJIT_R2 && word_arg_count >= SLJIT_R2)
+ *src_ptr = TMP_REG1;
+
+ if (word_arg_count >= 3)
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R2, 0);
+ return emit_mov(compiler, SLJIT_R2, 0, SLJIT_R0, 0);
+}
+
#else
- SLJIT_ASSERT(reg_map[SLJIT_R1] == 2 && reg_map[SLJIT_R0] < 8 && reg_map[SLJIT_R2] < 8 && reg_map[TMP_REG1] == 8);
- inst = (sljit_u8*)ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 6));
- FAIL_IF(!inst);
- INC_SIZE((type < SLJIT_CALL3) ? 3 : 6);
- if (type >= SLJIT_CALL3) {
- /* Move third argument to TMP_REG1. */
- *inst++ = REX_W | REX_R;
- *inst++ = MOV_r_rm;
- *inst++ = MOD_REG | (0x0 /* r8 */ << 3) | reg_lmap[SLJIT_R2];
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
+{
+ sljit_s32 src = src_ptr ? (*src_ptr) : 0;
+ sljit_s32 arg_count = 0;
+ sljit_s32 word_arg_count = 0;
+ sljit_s32 float_arg_count = 0;
+ sljit_s32 types = 0;
+ sljit_s32 data_trandfer = 0;
+ static sljit_u8 word_arg_regs[5] = { 0, SLJIT_R3, SLJIT_R1, SLJIT_R2, TMP_REG1 };
+
+ SLJIT_ASSERT(reg_map[SLJIT_R3] == 1 && reg_map[SLJIT_R1] == 2 && reg_map[SLJIT_R2] == 8 && reg_map[TMP_REG1] == 9);
+
+ compiler->mode32 = 0;
+ arg_types >>= SLJIT_DEF_SHIFT;
+
+ while (arg_types) {
+ types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+
+ switch (arg_types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ case SLJIT_ARG_TYPE_F64:
+ arg_count++;
+ float_arg_count++;
+
+ if (arg_count != float_arg_count)
+ data_trandfer = 1;
+ break;
+ default:
+ arg_count++;
+ word_arg_count++;
+
+ if (arg_count != word_arg_count || arg_count != word_arg_regs[arg_count]) {
+ data_trandfer = 1;
+
+ if (src == word_arg_regs[arg_count]) {
+ EMIT_MOV(compiler, TMP_REG2, 0, src, 0);
+ *src_ptr = TMP_REG2;
+ }
+ }
+ break;
+ }
+
+ arg_types >>= SLJIT_DEF_SHIFT;
}
- *inst++ = REX_W;
- *inst++ = MOV_r_rm;
- *inst++ = MOD_REG | (0x1 /* rcx */ << 3) | reg_lmap[SLJIT_R0];
-#endif
+
+ if (!data_trandfer)
+ return SLJIT_SUCCESS;
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
+ *src_ptr = TMP_REG2;
+ }
+
+ while (types) {
+ switch (types & SLJIT_DEF_MASK) {
+ case SLJIT_ARG_TYPE_F32:
+ if (arg_count != float_arg_count)
+ FAIL_IF(emit_sse2_load(compiler, 1, arg_count, float_arg_count, 0));
+ arg_count--;
+ float_arg_count--;
+ break;
+ case SLJIT_ARG_TYPE_F64:
+ if (arg_count != float_arg_count)
+ FAIL_IF(emit_sse2_load(compiler, 0, arg_count, float_arg_count, 0));
+ arg_count--;
+ float_arg_count--;
+ break;
+ default:
+ if (arg_count != word_arg_count || arg_count != word_arg_regs[arg_count])
+ EMIT_MOV(compiler, word_arg_regs[arg_count], 0, word_arg_count, 0);
+ arg_count--;
+ word_arg_count--;
+ break;
+ }
+
+ types >>= SLJIT_DEF_SHIFT;
+ }
+
return SLJIT_SUCCESS;
}
+#endif
+
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types)
+{
+ CHECK_ERROR_PTR();
+ CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
+
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL, 0));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_jump(compiler, type);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 arg_types,
+ sljit_s32 src, sljit_sw srcw)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
+
+ FAIL_IF(call_with_args(compiler, arg_types, &src, srcw));
+
+#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
+ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+ compiler->skip_checks = 1;
+#endif
+
+ return sljit_emit_ijump(compiler, type, src, srcw);
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
{
sljit_u8 *inst;
@@ -629,11 +762,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
CHECK(check_sljit_emit_fast_return(compiler, src, srcw));
ADJUST_LOCAL_OFFSET(src, srcw);
- if ((src & SLJIT_IMM) && NOT_HALFWORD(srcw)) {
- FAIL_IF(emit_load_imm64(compiler, TMP_REG1, srcw));
- src = TMP_REG1;
- }
-
if (FAST_IS_REG(src)) {
if (reg_map[src] < 8) {
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 1);
@@ -651,7 +779,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
PUSH_REG(reg_lmap[src]);
}
}
- else if (src & SLJIT_MEM) {
+ else {
/* REX_W is not necessary (src is not immediate). */
compiler->mode32 = 1;
inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
@@ -663,23 +791,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
FAIL_IF(!inst);
INC_SIZE(1);
}
- else {
- SLJIT_ASSERT(IS_HALFWORD(srcw));
- /* SLJIT_IMM. */
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 5 + 1);
- FAIL_IF(!inst);
-
- INC_SIZE(5 + 1);
- *inst++ = PUSH_i32;
- sljit_unaligned_store_s32(inst, srcw);
- inst += sizeof(sljit_s32);
- }
RET();
return SLJIT_SUCCESS;
}
-
/* --------------------------------------------------------------------- */
/* Extend input */
/* --------------------------------------------------------------------- */
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
index eb0886d671..ab7b36adb2 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
@@ -26,7 +26,11 @@
SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
{
+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+ return "x86" SLJIT_CPUINFO " ABI:fastcall";
+#else
return "x86" SLJIT_CPUINFO;
+#endif
}
/*
@@ -35,7 +39,7 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
1 - ECX
2 - EDX
3 - EBX
- 4 - none
+ 4 - ESP
5 - EBP
6 - ESI
7 - EDI
@@ -47,7 +51,7 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
1 - RCX
2 - RDX
3 - RBX
- 4 - none
+ 4 - RSP
5 - RBP
6 - RSI
7 - RDI
@@ -92,23 +96,32 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 3] = {
#ifndef _WIN64
/* Args: rdi(=7), rsi(=6), rdx(=2), rcx(=1), r8, r9. Scratches: rax(=0), r10, r11 */
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = {
- 0, 0, 6, 1, 7, 8, 11, 10, 12, 5, 13, 14, 15, 3, 4, 2, 9
+ 0, 0, 6, 7, 1, 8, 11, 10, 12, 5, 13, 14, 15, 3, 4, 2, 9
};
/* low-map. reg_map & 0x7. */
static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
- 0, 0, 6, 1, 7, 0, 3, 2, 4, 5, 5, 6, 7, 3, 4, 2, 1
+ 0, 0, 6, 7, 1, 0, 3, 2, 4, 5, 5, 6, 7, 3, 4, 2, 1
};
#else
/* Args: rcx(=1), rdx(=2), r8, r9. Scratches: rax(=0), r10, r11 */
static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = {
- 0, 0, 2, 1, 10, 11, 12, 5, 13, 14, 15, 7, 6, 3, 4, 8, 9
+ 0, 0, 2, 8, 1, 11, 12, 5, 13, 14, 15, 7, 6, 3, 4, 9, 10
};
/* low-map. reg_map & 0x7. */
static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
- 0, 0, 2, 1, 2, 3, 4, 5, 5, 6, 7, 7, 6, 3, 4, 0, 1
+ 0, 0, 2, 0, 1, 3, 4, 5, 5, 6, 7, 7, 6, 3, 4, 1, 2
};
#endif
+/* Args: xmm0-xmm3 */
+static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
+ 4, 0, 1, 2, 3, 5, 6
+};
+/* low-map. freg_map & 0x7. */
+static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
+ 4, 0, 1, 2, 3, 5, 6
+};
+
#define REX_W 0x48
#define REX_R 0x44
#define REX_X 0x42
@@ -178,6 +191,8 @@ static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
#define CVTTSD2SI_r_xm 0x2c
#define DIV (/* GROUP_F7 */ 6 << 3)
#define DIVSD_x_xm 0x5e
+#define FSTPS 0xd9
+#define FSTPD 0xdd
#define INT3 0xcc
#define IDIV (/* GROUP_F7 */ 7 << 3)
#define IMUL (/* GROUP_F7 */ 5 << 3)
@@ -462,11 +477,7 @@ static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code
code_ptr += sizeof(sljit_s8);
} else {
jump->flags |= PATCH_MW;
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- code_ptr += sizeof(sljit_sw);
-#else
code_ptr += sizeof(sljit_s32);
-#endif
}
return code_ptr;
@@ -613,9 +624,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
get_cpu_features();
return cpu_has_cmov;
- case SLJIT_HAS_PREF_SHIFT_REG:
- return 1;
-
case SLJIT_HAS_SSE2:
#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
if (cpu_has_sse2 == -1)
@@ -634,14 +642,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
/* Operators */
/* --------------------------------------------------------------------- */
+#define BINARY_OPCODE(opcode) (((opcode ## _EAX_i32) << 24) | ((opcode ## _r_rm) << 16) | ((opcode ## _rm_r) << 8) | (opcode))
+
static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
- sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm,
+ sljit_u32 op_types,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w);
static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
- sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm,
+ sljit_u32 op_types,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w);
@@ -653,10 +663,16 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler,
#define EMIT_MOV(compiler, dst, dstw, src, srcw) \
FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw));
+static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler,
+ sljit_s32 single, sljit_s32 dst, sljit_sw dstw, sljit_s32 src);
+
+static SLJIT_INLINE sljit_s32 emit_sse2_load(struct sljit_compiler *compiler,
+ sljit_s32 single, sljit_s32 dst, sljit_s32 src, sljit_sw srcw);
+
#ifdef _WIN32
#include <malloc.h>
-static void SLJIT_CALL sljit_grow_stack(sljit_sw local_size)
+static void SLJIT_FUNC sljit_grow_stack(sljit_sw local_size)
{
/* Workaround for calling the internal _chkstk() function on Windows.
This function touches all 4k pages belongs to the requested stack space,
@@ -1115,7 +1131,7 @@ static sljit_s32 emit_unary(struct sljit_compiler *compiler, sljit_u8 opcode,
return SLJIT_SUCCESS;
}
- if (dst == SLJIT_UNUSED)
+ if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED))
dst = TMP_REG1;
if (FAST_IS_REG(dst)) {
@@ -1182,12 +1198,6 @@ static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags,
SLJIT_UNUSED_ARG(op_flags);
- if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcw);
- src = TMP_REG1;
- srcw = 0;
- }
-
if (cpu_has_cmov == -1)
get_cpu_features();
@@ -1242,13 +1252,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
- sljit_s32 update = 0;
sljit_s32 op_flags = GET_ALL_FLAGS(op);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
sljit_s32 dst_is_ereg = 0;
- sljit_s32 src_is_ereg = 0;
-#else
-# define src_is_ereg 0
#endif
CHECK_ERROR();
@@ -1257,7 +1263,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
ADJUST_LOCAL_OFFSET(src, srcw);
CHECK_EXTRA_REGS(dst, dstw, dst_is_ereg = 1);
- CHECK_EXTRA_REGS(src, srcw, src_is_ereg = 1);
+ CHECK_EXTRA_REGS(src, srcw, (void)0);
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
compiler->mode32 = op_flags & SLJIT_I32_OP;
#endif
@@ -1270,34 +1276,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
op = GET_OPCODE(op);
- if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) {
+ if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
compiler->mode32 = 0;
#endif
+ if (FAST_IS_REG(src) && src == dst) {
+ if (!TYPE_CAST_NEEDED(op))
+ return SLJIT_SUCCESS;
+ }
+
if (op_flags & SLJIT_I32_OP) {
- if (FAST_IS_REG(src) && src == dst) {
- if (!TYPE_CAST_NEEDED(op))
- return SLJIT_SUCCESS;
- }
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (op == SLJIT_MOV_S32 && (src & SLJIT_MEM))
- op = SLJIT_MOV_U32;
- if (op == SLJIT_MOVU_S32 && (src & SLJIT_MEM))
- op = SLJIT_MOVU_U32;
- if (op == SLJIT_MOV_U32 && (src & SLJIT_IMM))
- op = SLJIT_MOV_S32;
- if (op == SLJIT_MOVU_U32 && (src & SLJIT_IMM))
- op = SLJIT_MOVU_S32;
+ if (src & SLJIT_MEM) {
+ if (op == SLJIT_MOV_S32)
+ op = SLJIT_MOV_U32;
+ }
+ else if (src & SLJIT_IMM) {
+ if (op == SLJIT_MOV_U32)
+ op = SLJIT_MOV_S32;
+ }
#endif
}
- SLJIT_COMPILE_ASSERT(SLJIT_MOV + 8 == SLJIT_MOVU, movu_offset);
- if (op >= SLJIT_MOVU) {
- update = 1;
- op -= 8;
- }
-
if (src & SLJIT_IMM) {
switch (op) {
case SLJIT_MOV_U8:
@@ -1369,28 +1370,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
if (SLJIT_UNLIKELY(dst_is_ereg) && dst == TMP_REG1)
return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), dstw, TMP_REG1, 0);
#endif
-
- if (SLJIT_UNLIKELY(update) && (src & SLJIT_MEM) && !src_is_ereg && (src & REG_MASK)) {
- if ((src & OFFS_REG_MASK) != 0) {
- FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
- (src & REG_MASK), 0, (src & REG_MASK), 0, OFFS_REG(dst), 0));
- }
- else if (srcw != 0) {
- FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
- (src & REG_MASK), 0, (src & REG_MASK), 0, SLJIT_IMM, srcw));
- }
- }
-
- if (SLJIT_UNLIKELY(update) && (dst & SLJIT_MEM) && (dst & REG_MASK)) {
- if ((dst & OFFS_REG_MASK) != 0) {
- FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
- (dst & REG_MASK), 0, (dst & REG_MASK), 0, OFFS_REG(dst), 0));
- }
- else if (dstw != 0) {
- FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
- (dst & REG_MASK), 0, (dst & REG_MASK), 0, SLJIT_IMM, dstw));
- }
- }
return SLJIT_SUCCESS;
}
@@ -1408,10 +1387,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
}
return SLJIT_SUCCESS;
-
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-# undef src_is_ereg
-#endif
}
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -1445,12 +1420,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
#endif
static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
- sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm,
+ sljit_u32 op_types,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
sljit_u8* inst;
+ sljit_u8 op_eax_imm = (op_types >> 24);
+ sljit_u8 op_rm = (op_types >> 16) & 0xff;
+ sljit_u8 op_mr = (op_types >> 8) & 0xff;
+ sljit_u8 op_imm = op_types & 0xff;
if (dst == SLJIT_UNUSED) {
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
@@ -1561,12 +1540,16 @@ static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
}
static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
- sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm,
+ sljit_u32 op_types,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
sljit_u8* inst;
+ sljit_u8 op_eax_imm = (op_types >> 24);
+ sljit_u8 op_rm = (op_types >> 16) & 0xff;
+ sljit_u8 op_mr = (op_types >> 8) & 0xff;
+ sljit_u8 op_imm = op_types & 0xff;
if (dst == SLJIT_UNUSED) {
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
@@ -2044,7 +2027,7 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
*inst |= mode;
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
}
- else if (FAST_IS_REG(dst) && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) {
+ else if (SLOW_IS_REG(dst) && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) {
if (src1 != dst)
EMIT_MOV(compiler, dst, 0, src1, src1w);
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
@@ -2057,27 +2040,24 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
else {
/* This case is complex since ecx itself may be used for
addressing, and this case must be supported as well. */
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
FAIL_IF(!inst);
*inst |= mode;
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0);
- EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
#else
- EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- EMIT_MOV(compiler, TMP_REG2, 0, src2, src2w);
- inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
- FAIL_IF(!inst);
- *inst = XCHG_r_rm;
+ EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
FAIL_IF(!inst);
*inst |= mode;
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
- EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
#endif
+ if (dst != SLJIT_UNUSED)
+ return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
}
return SLJIT_SUCCESS;
@@ -2101,7 +2081,7 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler,
if (!set_flags)
return emit_mov(compiler, dst, dstw, src1, src1w);
/* OR dst, src, 0 */
- return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, OR_EAX_i32,
+ return emit_cum_binary(compiler, BINARY_OPCODE(OR),
dst, dstw, src1, src1w, SLJIT_IMM, 0);
}
@@ -2111,10 +2091,10 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler,
if (!FAST_IS_REG(dst))
FAIL_IF(emit_cmp_binary(compiler, src1, src1w, SLJIT_IMM, 0));
- FAIL_IF(emit_shift(compiler,mode, dst, dstw, src1, src1w, src2, src2w));
+ FAIL_IF(emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w));
if (FAST_IS_REG(dst))
- return emit_cmp_binary(compiler, dst, dstw, SLJIT_IMM, 0);
+ return emit_cmp_binary(compiler, (dst == SLJIT_UNUSED) ? TMP_REG1 : dst, dstw, SLJIT_IMM, 0);
return SLJIT_SUCCESS;
}
@@ -2145,10 +2125,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
if (emit_lea_binary(compiler, dst, dstw, src1, src1w, src2, src2w) != SLJIT_ERR_UNSUPPORTED)
return compiler->error;
}
- return emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
+ return emit_cum_binary(compiler, BINARY_OPCODE(ADD),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_ADDC:
- return emit_cum_binary(compiler, ADC_r_rm, ADC_rm_r, ADC, ADC_EAX_i32,
+ return emit_cum_binary(compiler, BINARY_OPCODE(ADC),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUB:
if (!HAS_FLAGS(op)) {
@@ -2158,23 +2138,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
if (dst == SLJIT_UNUSED)
return emit_cmp_binary(compiler, src1, src1w, src2, src2w);
- return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32,
+ return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUBC:
- return emit_non_cum_binary(compiler, SBB_r_rm, SBB_rm_r, SBB, SBB_EAX_i32,
+ return emit_non_cum_binary(compiler, BINARY_OPCODE(SBB),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_MUL:
return emit_mul(compiler, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_AND:
if (dst == SLJIT_UNUSED)
return emit_test_binary(compiler, src1, src1w, src2, src2w);
- return emit_cum_binary(compiler, AND_r_rm, AND_rm_r, AND, AND_EAX_i32,
+ return emit_cum_binary(compiler, BINARY_OPCODE(AND),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_OR:
- return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, OR_EAX_i32,
+ return emit_cum_binary(compiler, BINARY_OPCODE(OR),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_XOR:
- return emit_cum_binary(compiler, XOR_r_rm, XOR_rm_r, XOR, XOR_EAX_i32,
+ return emit_cum_binary(compiler, BINARY_OPCODE(XOR),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SHL:
return emit_shift_with_flags(compiler, SHL, HAS_FLAGS(op),
@@ -2203,7 +2183,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
{
CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
return reg;
+#else
+ return freg_map[reg];
+#endif
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
@@ -2345,6 +2329,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
src1 = TMP_FREG;
}
+
return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_F32_OP), src1, src2, src2w);
}
@@ -2516,9 +2501,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
type &= 0xff;
- if (type >= SLJIT_CALL1)
- PTR_FAIL_IF(call_with_args(compiler, type));
-
/* Worst case size. */
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
compiler->size += (type >= SLJIT_JUMP) ? 5 : 6;
@@ -2534,14 +2516,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
return jump;
}
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-#ifndef _WIN64
-#define IS_REG_CHANGED_BY_CALL(src, type) ((src) == SLJIT_R3)
-#else
-#define IS_REG_CHANGED_BY_CALL(src, type) ((src) == SLJIT_R2)
-#endif
-#endif
-
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
{
sljit_u8 *inst;
@@ -2553,25 +2527,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
CHECK_EXTRA_REGS(src, srcw, (void)0);
- if (type >= SLJIT_CALL1) {
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- if (src == SLJIT_R2) {
- EMIT_MOV(compiler, TMP_REG1, 0, src, 0);
- src = TMP_REG1;
- }
- if (src == SLJIT_MEM1(SLJIT_SP) && type >= SLJIT_CALL3)
- srcw += sizeof(sljit_sw);
-#endif
-#else
- if ((src & SLJIT_MEM) || IS_REG_CHANGED_BY_CALL(src, type)) {
- EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
- src = TMP_REG2;
- }
-#endif
- FAIL_IF(call_with_args(compiler, type));
- }
-
if (src == SLJIT_IMM) {
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF_NULL(jump);
diff --git a/src/3rdparty/pcre2/src/sljit/sljitUtils.c b/src/3rdparty/pcre2/src/sljit/sljitUtils.c
index 9029db292c..5c2a838932 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitUtils.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitUtils.c
@@ -48,12 +48,12 @@ static SLJIT_INLINE void allocator_release_lock(void)
#if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void)
{
/* Always successful. */
}
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void)
{
/* Always successful. */
}
@@ -88,7 +88,7 @@ static SLJIT_INLINE void allocator_release_lock(void)
static HANDLE global_mutex = 0;
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void)
{
/* No idea what to do if an error occures. Static mutexes should never fail... */
if (!global_mutex)
@@ -97,7 +97,7 @@ SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
WaitForSingleObject(global_mutex, INFINITE);
}
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void)
{
ReleaseMutex(global_mutex);
}
@@ -130,12 +130,12 @@ static SLJIT_INLINE void allocator_release_lock(void)
static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER;
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void)
{
pthread_mutex_lock(&global_mutex);
}
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void)
{
pthread_mutex_unlock(&global_mutex);
}
@@ -203,7 +203,7 @@ static SLJIT_INLINE sljit_s32 open_dev_zero(void)
/* Planning to make it even more clever in the future. */
static sljit_sw sljit_page_align = 0;
-SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit, void *allocator_data)
+SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_FUNC sljit_allocate_stack(sljit_uw start_size, sljit_uw max_size, void *allocator_data)
{
struct sljit_stack *stack;
void *ptr;
@@ -212,7 +212,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(slj
#endif
SLJIT_UNUSED_ARG(allocator_data);
- if (limit > max_limit || limit < 1)
+ if (start_size > max_size || start_size < 1)
return NULL;
#ifdef _WIN32
@@ -234,25 +234,27 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(slj
if (!stack)
return NULL;
- /* Align max_limit. */
- max_limit = (max_limit + sljit_page_align) & ~sljit_page_align;
+ /* Align max_size. */
+ max_size = (max_size + sljit_page_align) & ~sljit_page_align;
#ifdef _WIN32
- ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE);
+ ptr = VirtualAlloc(NULL, max_size, MEM_RESERVE, PAGE_READWRITE);
if (!ptr) {
SLJIT_FREE(stack, allocator_data);
return NULL;
}
- stack->max_limit = (sljit_u8 *)ptr;
- stack->base = stack->max_limit + max_limit;
- stack->limit = stack->base;
- if (sljit_stack_resize(stack, stack->base - limit)) {
+
+ stack->min_start = (sljit_u8 *)ptr;
+ stack->end = stack->min_start + max_size;
+ stack->start = stack->end;
+
+ if (sljit_stack_resize(stack, stack->end - start_size) == NULL) {
sljit_free_stack(stack, allocator_data);
return NULL;
}
#else
#ifdef MAP_ANON
- ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ ptr = mmap(NULL, max_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
#else
if (dev_zero < 0) {
if (open_dev_zero()) {
@@ -260,73 +262,70 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(slj
return NULL;
}
}
- ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0);
+ ptr = mmap(NULL, max_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0);
#endif
if (ptr == MAP_FAILED) {
SLJIT_FREE(stack, allocator_data);
return NULL;
}
- stack->max_limit = (sljit_u8 *)ptr;
- stack->base = stack->max_limit + max_limit;
- stack->limit = stack->base - limit;
+ stack->min_start = (sljit_u8 *)ptr;
+ stack->end = stack->min_start + max_size;
+ stack->start = stack->end - start_size;
#endif
- stack->top = stack->base;
+ stack->top = stack->end;
return stack;
}
#undef PAGE_ALIGN
-SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack *stack, void *allocator_data)
+SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_free_stack(struct sljit_stack *stack, void *allocator_data)
{
SLJIT_UNUSED_ARG(allocator_data);
#ifdef _WIN32
- VirtualFree((void*)stack->max_limit, 0, MEM_RELEASE);
+ VirtualFree((void*)stack->min_start, 0, MEM_RELEASE);
#else
- munmap((void*)stack->max_limit, stack->base - stack->max_limit);
+ munmap((void*)stack->min_start, stack->end - stack->min_start);
#endif
SLJIT_FREE(stack, allocator_data);
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack *stack, sljit_u8 *new_limit)
+SLJIT_API_FUNC_ATTRIBUTE sljit_u8 *SLJIT_FUNC sljit_stack_resize(struct sljit_stack *stack, sljit_u8 *new_start)
{
- sljit_uw aligned_old_limit;
- sljit_uw aligned_new_limit;
+ sljit_uw aligned_old_start;
+ sljit_uw aligned_new_start;
+
+ if ((new_start < stack->min_start) || (new_start >= stack->end))
+ return NULL;
- if ((new_limit < stack->max_limit) || (new_limit >= stack->base))
- return -1;
#ifdef _WIN32
- aligned_new_limit = (sljit_uw)new_limit & ~sljit_page_align;
- aligned_old_limit = ((sljit_uw)stack->limit) & ~sljit_page_align;
- if (aligned_new_limit != aligned_old_limit) {
- if (aligned_new_limit < aligned_old_limit) {
- if (!VirtualAlloc((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_COMMIT, PAGE_READWRITE))
- return -1;
+ aligned_new_start = (sljit_uw)new_start & ~sljit_page_align;
+ aligned_old_start = ((sljit_uw)stack->start) & ~sljit_page_align;
+ if (aligned_new_start != aligned_old_start) {
+ if (aligned_new_start < aligned_old_start) {
+ if (!VirtualAlloc((void*)aligned_new_start, aligned_old_start - aligned_new_start, MEM_COMMIT, PAGE_READWRITE))
+ return NULL;
}
else {
- if (!VirtualFree((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_DECOMMIT))
- return -1;
+ if (!VirtualFree((void*)aligned_old_start, aligned_new_start - aligned_old_start, MEM_DECOMMIT))
+ return NULL;
}
}
- stack->limit = new_limit;
- return 0;
#else
- if (new_limit <= stack->limit) {
- stack->limit = new_limit;
- return 0;
- }
- aligned_new_limit = (sljit_uw)new_limit & ~sljit_page_align;
- aligned_old_limit = ((sljit_uw)stack->limit) & ~sljit_page_align;
- /* If madvise is available, we release the unnecessary space. */
+ if (stack->start < new_start) {
+ aligned_new_start = (sljit_uw)new_start & ~sljit_page_align;
+ aligned_old_start = ((sljit_uw)stack->start) & ~sljit_page_align;
+ /* If madvise is available, we release the unnecessary space. */
#if defined(MADV_DONTNEED)
- if (aligned_new_limit > aligned_old_limit)
- madvise((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MADV_DONTNEED);
+ if (aligned_new_start > aligned_old_start)
+ madvise((void*)aligned_old_start, aligned_new_start - aligned_old_start, MADV_DONTNEED);
#elif defined(POSIX_MADV_DONTNEED)
- if (aligned_new_limit > aligned_old_limit)
- posix_madvise((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, POSIX_MADV_DONTNEED);
+ if (aligned_new_start > aligned_old_start)
+ posix_madvise((void*)aligned_old_start, aligned_new_start - aligned_old_start, POSIX_MADV_DONTNEED);
#endif
- stack->limit = new_limit;
- return 0;
+ }
#endif
+ stack->start = new_start;
+ return new_start;
}
#endif /* SLJIT_UTIL_STACK */
diff --git a/src/corelib/doc/snippets/code/src_corelib_tools_qlistdata.cpp b/src/corelib/doc/snippets/code/src_corelib_tools_qlistdata.cpp
index 27565a7878..cc3f689710 100644
--- a/src/corelib/doc/snippets/code/src_corelib_tools_qlistdata.cpp
+++ b/src/corelib/doc/snippets/code/src_corelib_tools_qlistdata.cpp
@@ -131,7 +131,7 @@ list.removeAll("sun");
QList<QString> list;
list << "sun" << "cloud" << "sun" << "rain";
list.removeOne("sun");
-// list: ["cloud", ,"sun", "rain"]
+// list: ["cloud", "sun", "rain"]
//! [10]
diff --git a/src/corelib/io/qdir.cpp b/src/corelib/io/qdir.cpp
index 06eacf5455..7c0a48f8f2 100644
--- a/src/corelib/io/qdir.cpp
+++ b/src/corelib/io/qdir.cpp
@@ -715,6 +715,37 @@ QString QDir::dirName() const
return d->dirEntry.fileName();
}
+
+#ifdef Q_OS_WIN
+static int drivePrefixLength(const QString &path)
+{
+ // Used to extract path's drive for use as prefix for an "absolute except for drive" path
+ const int size = path.length();
+ int drive = 2; // length of drive prefix
+ if (size > 1 && path.at(1).unicode() == ':') {
+ if (Q_UNLIKELY(!path.at(0).isLetter()))
+ return 0;
+ } else if (path.startsWith(QLatin1String("//"))) {
+ // UNC path; use its //server/share part as "drive" - it's as sane a
+ // thing as we can do.
+ for (int i = 2; i-- > 0; ) { // Scan two "path fragments":
+ while (drive < size && path.at(drive).unicode() == '/')
+ drive++;
+ if (drive >= size) {
+ qWarning("Base directory starts with neither a drive nor a UNC share: %s",
+ qUtf8Printable(QDir::toNativeSeparators(path)));
+ return 0;
+ }
+ while (drive < size && path.at(drive).unicode() != '/')
+ drive++;
+ }
+ } else {
+ return 0;
+ }
+ return drive;
+}
+#endif // Q_OS_WIN
+
/*!
Returns the path name of a file in the directory. Does \e not
check if the file actually exists in the directory; but see
@@ -727,16 +758,27 @@ QString QDir::dirName() const
QString QDir::filePath(const QString &fileName) const
{
const QDirPrivate* d = d_ptr.constData();
- if (isAbsolutePath(fileName))
+ // Mistrust our own isAbsolutePath() for real files; Q_OS_WIN needs a drive.
+ if (fileName.startsWith(QLatin1Char(':')) // i.e. resource path
+ ? isAbsolutePath(fileName) : QFileSystemEntry(fileName).isAbsolute()) {
return fileName;
+ }
QString ret = d->dirEntry.filePath();
- if (!fileName.isEmpty()) {
- if (!ret.isEmpty() && ret[(int)ret.length()-1] != QLatin1Char('/') && fileName[0] != QLatin1Char('/'))
- ret += QLatin1Char('/');
- ret += fileName;
+ if (fileName.isEmpty())
+ return ret;
+
+#ifdef Q_OS_WIN
+ if (fileName.startsWith(QLatin1Char('/')) || fileName.startsWith(QLatin1Char('\\'))) {
+ // Handle the "absolute except for drive" case (i.e. \blah not c:\blah):
+ const int drive = drivePrefixLength(ret);
+ return drive > 0 ? ret.leftRef(drive) % fileName : fileName;
}
- return ret;
+#endif // Q_OS_WIN
+
+ if (ret.isEmpty() || ret.endsWith(QLatin1Char('/')))
+ return ret % fileName;
+ return ret % QLatin1Char('/') % fileName;
}
/*!
@@ -750,9 +792,11 @@ QString QDir::filePath(const QString &fileName) const
QString QDir::absoluteFilePath(const QString &fileName) const
{
const QDirPrivate* d = d_ptr.constData();
- // Don't trust our own isAbsolutePath(); Q_OS_WIN needs a drive.
- if (QFileSystemEntry(fileName).isAbsolute())
+ // Mistrust our own isAbsolutePath() for real files; Q_OS_WIN needs a drive.
+ if (fileName.startsWith(QLatin1Char(':')) // i.e. resource path
+ ? isAbsolutePath(fileName) : QFileSystemEntry(fileName).isAbsolute()) {
return fileName;
+ }
d->resolveAbsoluteEntry();
const QString absoluteDirPath = d->absoluteDirEntry.filePath();
@@ -760,35 +804,15 @@ QString QDir::absoluteFilePath(const QString &fileName) const
return absoluteDirPath;
#ifdef Q_OS_WIN
// Handle the "absolute except for drive" case (i.e. \blah not c:\blah):
- int size = absoluteDirPath.length();
- if ((fileName.startsWith(QLatin1Char('/'))
- || fileName.startsWith(QLatin1Char('\\')))
- && size > 1) {
+ if (fileName.startsWith(QLatin1Char('/')) || fileName.startsWith(QLatin1Char('\\'))) {
// Combine absoluteDirPath's drive with fileName
- int drive = 2; // length of drive prefix
- if (Q_UNLIKELY(absoluteDirPath.at(1).unicode() != ':')) {
- // Presumably, absoluteDirPath is an UNC path; use its //server/share
- // part as "drive" - it's as sane a thing as we can do.
- for (int i = 2; i-- > 0; ) { // Scan two "path fragments":
- while (drive < size && absoluteDirPath.at(drive).unicode() == '/')
- drive++;
- if (drive >= size) {
- qWarning("Base directory starts with neither a drive nor a UNC share: %s",
- qPrintable(QDir::toNativeSeparators(absoluteDirPath)));
- return QString();
- }
- while (drive < size && absoluteDirPath.at(drive).unicode() != '/')
- drive++;
- }
- // We'll append fileName, which starts with a slash; so omit trailing slash:
- if (absoluteDirPath.at(drive).unicode() == '/')
- drive--;
- } else if (!absoluteDirPath.at(0).isLetter()) {
- qWarning("Base directory's drive is not a letter: %s",
- qPrintable(QDir::toNativeSeparators(absoluteDirPath)));
- return QString();
- }
- return absoluteDirPath.leftRef(drive) % fileName;
+ const int drive = drivePrefixLength(absoluteDirPath);
+ if (Q_LIKELY(drive))
+ return absoluteDirPath.leftRef(drive) % fileName;
+
+ qWarning("Base directory's drive is not a letter: %s",
+ qUtf8Printable(QDir::toNativeSeparators(absoluteDirPath)));
+ return QString();
}
#endif // Q_OS_WIN
if (!absoluteDirPath.endsWith(QLatin1Char('/')))
diff --git a/src/corelib/io/qfsfileengine_unix.cpp b/src/corelib/io/qfsfileengine_unix.cpp
index bf648cdfe0..90ad0126d6 100644
--- a/src/corelib/io/qfsfileengine_unix.cpp
+++ b/src/corelib/io/qfsfileengine_unix.cpp
@@ -636,6 +636,7 @@ bool QFSFileEngine::setFileTime(const QDateTime &newDate, FileTime time)
uchar *QFSFileEnginePrivate::map(qint64 offset, qint64 size, QFile::MemoryMapFlags flags)
{
+ qint64 maxFileOffset = std::numeric_limits<QT_OFF_T>::max();
#if (defined(Q_OS_LINUX) || defined(Q_OS_ANDROID)) && Q_PROCESSOR_WORDSIZE == 4
// The Linux mmap2 system call on 32-bit takes a page-shifted 32-bit
// integer so the maximum offset is 1 << (32+12) (the shift is always 12,
@@ -644,9 +645,7 @@ uchar *QFSFileEnginePrivate::map(qint64 offset, qint64 size, QFile::MemoryMapFla
// and Bionic): all of them do the right shift, but don't confirm that the
// result fits into the 32-bit parameter to the kernel.
- static qint64 MaxFileOffset = (Q_INT64_C(1) << (32+12)) - 1;
-#else
- static qint64 MaxFileOffset = std::numeric_limits<QT_OFF_T>::max();
+ maxFileOffset = qMin((Q_INT64_C(1) << (32+12)) - 1, maxFileOffset);
#endif
Q_Q(QFSFileEngine);
@@ -655,7 +654,7 @@ uchar *QFSFileEnginePrivate::map(qint64 offset, qint64 size, QFile::MemoryMapFla
return 0;
}
- if (offset < 0 || offset > MaxFileOffset
+ if (offset < 0 || offset > maxFileOffset
|| size < 0 || quint64(size) > quint64(size_t(-1))) {
q->setError(QFile::UnspecifiedError, qt_error_string(int(EINVAL)));
return 0;
diff --git a/src/corelib/io/qstandardpaths_win.cpp b/src/corelib/io/qstandardpaths_win.cpp
index eeb02419c3..1809861fc6 100644
--- a/src/corelib/io/qstandardpaths_win.cpp
+++ b/src/corelib/io/qstandardpaths_win.cpp
@@ -129,7 +129,7 @@ static GUID writableSpecialFolderId(QStandardPaths::StandardLocation type)
}
// Convenience for SHGetKnownFolderPath().
-static QString sHGetKnownFolderPath(const GUID &clsid, QStandardPaths::StandardLocation type, bool warn = false)
+static QString sHGetKnownFolderPath(const GUID &clsid)
{
QString result;
typedef HRESULT (WINAPI *GetKnownFolderPath)(const GUID&, DWORD, HANDLE, LPWSTR*);
@@ -141,11 +141,6 @@ static QString sHGetKnownFolderPath(const GUID &clsid, QStandardPaths::StandardL
if (Q_LIKELY(sHGetKnownFolderPath && SUCCEEDED(sHGetKnownFolderPath(clsid, KF_FLAG_DONT_VERIFY, 0, &path)))) {
result = convertCharArray(path);
CoTaskMemFree(path);
- } else {
- if (warn) {
- qErrnoWarning("SHGetKnownFolderPath() failed for standard location \"%s\".",
- qPrintable(displayName(type)));
- }
}
return result;
}
@@ -155,7 +150,7 @@ QString QStandardPaths::writableLocation(StandardLocation type)
QString result;
switch (type) {
case DownloadLocation:
- result = sHGetKnownFolderPath(FOLDERID_Downloads, type);
+ result = sHGetKnownFolderPath(FOLDERID_Downloads);
if (result.isEmpty())
result = QStandardPaths::writableLocation(QStandardPaths::DocumentsLocation);
break;
@@ -164,7 +159,7 @@ QString QStandardPaths::writableLocation(StandardLocation type)
// Although Microsoft has a Cache key it is a pointer to IE's cache, not a cache
// location for everyone. Most applications seem to be using a
// cache directory located in their AppData directory
- result = sHGetKnownFolderPath(writableSpecialFolderId(AppLocalDataLocation), type, /* warn */ true);
+ result = sHGetKnownFolderPath(writableSpecialFolderId(AppLocalDataLocation));
if (!result.isEmpty()) {
appendTestMode(result);
appendOrganizationAndApp(result);
@@ -173,7 +168,7 @@ QString QStandardPaths::writableLocation(StandardLocation type)
break;
case GenericCacheLocation:
- result = sHGetKnownFolderPath(writableSpecialFolderId(GenericDataLocation), type, /* warn */ true);
+ result = sHGetKnownFolderPath(writableSpecialFolderId(GenericDataLocation));
if (!result.isEmpty()) {
appendTestMode(result);
result += QLatin1String("/cache");
@@ -190,7 +185,7 @@ QString QStandardPaths::writableLocation(StandardLocation type)
break;
default:
- result = sHGetKnownFolderPath(writableSpecialFolderId(type), type, /* warn */ isConfigLocation(type));
+ result = sHGetKnownFolderPath(writableSpecialFolderId(type));
if (!result.isEmpty() && isConfigLocation(type)) {
appendTestMode(result);
if (!isGenericConfigLocation(type))
@@ -214,7 +209,7 @@ QStringList QStandardPaths::standardLocations(StandardLocation type)
// type-specific handling goes here
if (isConfigLocation(type)) {
- QString programData = sHGetKnownFolderPath(FOLDERID_ProgramData, type);
+ QString programData = sHGetKnownFolderPath(FOLDERID_ProgramData);
if (!programData.isEmpty()) {
if (!isGenericConfigLocation(type))
appendOrganizationAndApp(programData);
diff --git a/src/corelib/io/qurl.cpp b/src/corelib/io/qurl.cpp
index 25881b9c62..399c3b8861 100644
--- a/src/corelib/io/qurl.cpp
+++ b/src/corelib/io/qurl.cpp
@@ -2545,7 +2545,7 @@ int QUrl::port(int defaultPort) const
The \a path data is interpreted according to \a mode: in StrictMode,
any '%' characters must be followed by exactly two hexadecimal characters
and some characters (including space) are not allowed in undecoded form. In
- TolerantMode (the default), all characters are accepted in undecoded form and the
+ TolerantMode, all characters are accepted in undecoded form and the
tolerant parser will correct stray '%' not followed by two hex characters.
In DecodedMode, '%' stand for themselves and encoded characters are not
possible.
diff --git a/src/corelib/kernel/qcore_unix.cpp b/src/corelib/kernel/qcore_unix.cpp
index eb98cbef8f..18c031f137 100644
--- a/src/corelib/kernel/qcore_unix.cpp
+++ b/src/corelib/kernel/qcore_unix.cpp
@@ -44,6 +44,12 @@
#include <stdlib.h>
+#ifdef __GLIBC__
+# include <sys/syscall.h>
+# include <pthread.h>
+# include <unistd.h>
+#endif
+
#ifdef Q_OS_MAC
#include <mach/mach_time.h>
#endif
@@ -79,6 +85,20 @@ QByteArray qt_readlink(const char *path)
return buf;
}
+#if defined(Q_PROCESSOR_X86_32) && defined(__GLIBC__)
+# if !__GLIBC_PREREQ(2, 22)
+// glibc prior to release 2.22 had a bug that suppresses the third argument to
+// open() / open64() / openat(), causing file creation with O_TMPFILE to have
+// the wrong permissions. So we bypass the glibc implementation and go straight
+// for the syscall. See
+// https://sourceware.org/git/?p=glibc.git;a=commit;h=65f6f938cd562a614a68e15d0581a34b177ec29d
+int qt_open64(const char *pathname, int flags, mode_t mode)
+{
+ return syscall(SYS_open, pathname, flags | O_LARGEFILE, mode);
+}
+# endif
+#endif
+
#ifndef QT_BOOTSTRAPPED
#if QT_CONFIG(poll_pollts)
diff --git a/src/corelib/kernel/qcore_unix_p.h b/src/corelib/kernel/qcore_unix_p.h
index e538a7e22b..cb98bef347 100644
--- a/src/corelib/kernel/qcore_unix_p.h
+++ b/src/corelib/kernel/qcore_unix_p.h
@@ -176,6 +176,14 @@ inline void qt_ignore_sigpipe()
}
}
+#if defined(Q_PROCESSOR_X86_32) && defined(__GLIBC__)
+# if !__GLIBC_PREREQ(2, 22)
+int qt_open64(const char *pathname, int flags, mode_t);
+# undef QT_OPEN
+# define QT_OPEN qt_open64
+# endif
+#endif
+
// don't call QT_OPEN or ::open
// call qt_safe_open
static inline int qt_safe_open(const char *pathname, int flags, mode_t mode = 0777)
diff --git a/src/corelib/kernel/qcoreevent.cpp b/src/corelib/kernel/qcoreevent.cpp
index e34fe3f955..cacbb1e495 100644
--- a/src/corelib/kernel/qcoreevent.cpp
+++ b/src/corelib/kernel/qcoreevent.cpp
@@ -204,6 +204,12 @@ QT_BEGIN_NAMESPACE
\value Scroll The object needs to scroll to the supplied position (QScrollEvent).
\value Shortcut Key press in child for shortcut key handling (QShortcutEvent).
\value ShortcutOverride Key press in child, for overriding shortcut key handling (QKeyEvent).
+ When a shortcut is about to trigger, \c ShortcutOverride
+ is sent to the active window. This allows clients (e.g. widgets)
+ to signal that they will handle the shortcut themselves, by
+ accepting the event. If the shortcut override is accepted, the
+ event is delivered as a normal key press to the focus widget.
+ Otherwise, it triggers the shortcut action, if one exists.
\value Show Widget was shown on screen (QShowEvent).
\value ShowToParent A child widget has been shown.
\value SockAct Socket activated, used to implement QSocketNotifier.
diff --git a/src/corelib/plugin/qfactoryloader.cpp b/src/corelib/plugin/qfactoryloader.cpp
index ec6d98cc3c..0b35f41ca3 100644
--- a/src/corelib/plugin/qfactoryloader.cpp
+++ b/src/corelib/plugin/qfactoryloader.cpp
@@ -1,6 +1,7 @@
/****************************************************************************
**
** Copyright (C) 2016 The Qt Company Ltd.
+** Copyright (C) 2018 Intel Corporation.
** Contact: https://www.qt.io/licensing/
**
** This file is part of the QtCore module of the Qt Toolkit.
@@ -58,6 +59,29 @@
QT_BEGIN_NAMESPACE
+static inline int metaDataSignatureLength()
+{
+ return sizeof("QTMETADATA ") - 1;
+}
+
+QJsonDocument qJsonFromRawLibraryMetaData(const char *raw, qsizetype sectionSize)
+{
+ raw += metaDataSignatureLength();
+ sectionSize -= metaDataSignatureLength();
+
+ // the size of the embedded JSON object can be found 8 bytes into the data (see qjson_p.h)
+ uint size = qFromLittleEndian<uint>(raw + 8);
+ // but the maximum size of binary JSON is 128 MB
+ size = qMin(size, 128U * 1024 * 1024);
+ // and it doesn't include the size of the header (8 bytes)
+ size += 8;
+ // finally, it can't be bigger than the file or section size
+ size = qMin(sectionSize, qsizetype(size));
+
+ QByteArray json(raw, size);
+ return QJsonDocument::fromBinaryData(json);
+}
+
class QFactoryLoaderPrivate : public QObjectPrivate
{
Q_DECLARE_PUBLIC(QFactoryLoader)
diff --git a/src/corelib/plugin/qfactoryloader_p.h b/src/corelib/plugin/qfactoryloader_p.h
index 7be18942ae..fe722999ae 100644
--- a/src/corelib/plugin/qfactoryloader_p.h
+++ b/src/corelib/plugin/qfactoryloader_p.h
@@ -66,14 +66,7 @@
QT_BEGIN_NAMESPACE
-inline QJsonDocument qJsonFromRawLibraryMetaData(const char *raw)
-{
- raw += strlen("QTMETADATA ");
- // the size of the embedded JSON object can be found 8 bytes into the data (see qjson_p.h),
- // but doesn't include the size of the header (8 bytes)
- QByteArray json(raw, qFromLittleEndian<uint>(*(const uint *)(raw + 8)) + 8);
- return QJsonDocument::fromBinaryData(json);
-}
+QJsonDocument qJsonFromRawLibraryMetaData(const char *raw, qsizetype size);
class QFactoryLoaderPrivate;
class Q_CORE_EXPORT QFactoryLoader : public QObject
diff --git a/src/corelib/plugin/qlibrary.cpp b/src/corelib/plugin/qlibrary.cpp
index 4b55ead668..869ef6181f 100644
--- a/src/corelib/plugin/qlibrary.cpp
+++ b/src/corelib/plugin/qlibrary.cpp
@@ -317,7 +317,7 @@ static bool findPatternUnloaded(const QString &library, QLibraryPrivate *lib)
if (pos >= 0) {
if (hasMetaData) {
const char *data = filedata + pos;
- QJsonDocument doc = qJsonFromRawLibraryMetaData(data);
+ QJsonDocument doc = qJsonFromRawLibraryMetaData(data, qsizetype(fdlen));
lib->metaData = doc.object();
if (qt_debug_component())
qWarning("Found metadata in lib %s, metadata=\n%s\n",
@@ -691,7 +691,8 @@ static bool qt_get_metadata(QtPluginQueryVerificationDataFunction pfn, QLibraryP
if (!szData)
return false;
- QJsonDocument doc = qJsonFromRawLibraryMetaData(szData);
+ // the data is already loaded, so the size doesn't matter
+ QJsonDocument doc = qJsonFromRawLibraryMetaData(szData, INT_MAX);
if (doc.isNull())
return false;
priv->metaData = doc.object();
diff --git a/src/corelib/plugin/qpluginloader.cpp b/src/corelib/plugin/qpluginloader.cpp
index aab00cc7eb..83cbcd2b44 100644
--- a/src/corelib/plugin/qpluginloader.cpp
+++ b/src/corelib/plugin/qpluginloader.cpp
@@ -1,6 +1,7 @@
/****************************************************************************
**
** Copyright (C) 2016 The Qt Company Ltd.
+** Copyright (C) 2018 Intel Corporation.
** Contact: https://www.qt.io/licensing/
**
** This file is part of the QtCore module of the Qt Toolkit.
@@ -474,7 +475,10 @@ QVector<QStaticPlugin> QPluginLoader::staticPlugins()
*/
QJsonObject QStaticPlugin::metaData() const
{
- return qJsonFromRawLibraryMetaData(rawMetaData()).object();
+ // the data is already loaded, so this doesn't matter
+ qsizetype rawMetaDataSize = INT_MAX;
+
+ return qJsonFromRawLibraryMetaData(rawMetaData(), rawMetaDataSize).object();
}
QT_END_NAMESPACE
diff --git a/src/corelib/serialization/qdatastream.cpp b/src/corelib/serialization/qdatastream.cpp
index 54d1ae816b..60467b4824 100644
--- a/src/corelib/serialization/qdatastream.cpp
+++ b/src/corelib/serialization/qdatastream.cpp
@@ -1082,7 +1082,7 @@ QDataStream &QDataStream::readBytes(char *&s, uint &l)
Reads at most \a len bytes from the stream into \a s and returns the number of
bytes read. If an error occurs, this function returns -1.
- The buffer \a s must be preallocated. The data is \e not encoded.
+ The buffer \a s must be preallocated. The data is \e not decoded.
\sa readBytes(), QIODevice::read(), writeRawData()
*/
diff --git a/src/corelib/serialization/qjsondocument.cpp b/src/corelib/serialization/qjsondocument.cpp
index ab27b45fda..5018f7c267 100644
--- a/src/corelib/serialization/qjsondocument.cpp
+++ b/src/corelib/serialization/qjsondocument.cpp
@@ -344,6 +344,7 @@ QByteArray QJsonDocument::toJson() const
/*!
\enum QJsonDocument::JsonFormat
+ \since 5.1
This value defines the format of the JSON byte array produced
when converting to a QJsonDocument using toJson().
@@ -368,6 +369,7 @@ QByteArray QJsonDocument::toJson() const
*/
/*!
+ \since 5.1
Converts the QJsonDocument to a UTF-8 encoded JSON document in the provided \a format.
\sa fromJson(), JsonFormat
diff --git a/src/corelib/serialization/qjsonvalue.cpp b/src/corelib/serialization/qjsonvalue.cpp
index 3c5b0a0e02..4469302e31 100644
--- a/src/corelib/serialization/qjsonvalue.cpp
+++ b/src/corelib/serialization/qjsonvalue.cpp
@@ -588,6 +588,7 @@ bool QJsonValue::toBool(bool defaultValue) const
}
/*!
+ \since 5.2
Converts the value to an int and returns it.
If type() is not Double or the value is not a whole number,
diff --git a/src/corelib/tools/qchar.h b/src/corelib/tools/qchar.h
index 84df8accc5..8590b91ba3 100644
--- a/src/corelib/tools/qchar.h
+++ b/src/corelib/tools/qchar.h
@@ -93,7 +93,9 @@ public:
Q_STATIC_ASSERT(sizeof(wchar_t) == sizeof(ushort));
#endif
#if defined(Q_OS_WIN) || defined(Q_CLANG_QDOC)
+# if !defined(_WCHAR_T_DEFINED) || defined(_NATIVE_WCHAR_T_DEFINED)
Q_DECL_CONSTEXPR QChar(wchar_t ch) Q_DECL_NOTHROW : ucs(ushort(ch)) {} // implicit
+# endif
#endif
#ifndef QT_NO_CAST_FROM_ASCII
diff --git a/src/corelib/tools/qcollator.cpp b/src/corelib/tools/qcollator.cpp
index f1e3d6652d..5155badcf8 100644
--- a/src/corelib/tools/qcollator.cpp
+++ b/src/corelib/tools/qcollator.cpp
@@ -89,7 +89,12 @@ QCollator::QCollator(const QLocale &locale)
QCollator::QCollator(const QCollator &other)
: d(other.d)
{
- d->ref.ref();
+ if (d) {
+ // Ensure clean, lest both copies try to init() at the same time:
+ if (d->dirty)
+ d->init();
+ d->ref.ref();
+ }
}
/*!
@@ -110,7 +115,12 @@ QCollator &QCollator::operator=(const QCollator &other)
if (d && !d->ref.deref())
delete d;
d = other.d;
- if (d) d->ref.ref();
+ if (d) {
+ // Ensure clean, lest both copies try to init() at the same time:
+ if (d->dirty)
+ d->init();
+ d->ref.ref();
+ }
}
return *this;
}
diff --git a/src/gui/image/qiconengine.cpp b/src/gui/image/qiconengine.cpp
index 1f8e5f321a..3033112df0 100644
--- a/src/gui/image/qiconengine.cpp
+++ b/src/gui/image/qiconengine.cpp
@@ -263,7 +263,7 @@ void QIconEngine::addFile(const QString &/*fileName*/, const QSize &/*size*/, QI
\variable QIconEngine::ScaledPixmapArgument::pixmap
\brief The pixmap that is the best match for the given \l size, \l mode, \l
- \state, and \l scale. This is an output parameter that is set after calling
+ state, and \l scale. This is an output parameter that is set after calling
\l virtual_hook().
*/
diff --git a/src/gui/kernel/qevent.cpp b/src/gui/kernel/qevent.cpp
index e8f3cdd504..2362b93bf0 100644
--- a/src/gui/kernel/qevent.cpp
+++ b/src/gui/kernel/qevent.cpp
@@ -5031,7 +5031,9 @@ void QTouchEvent::TouchPoint::setLastNormalizedPos(const QPointF &lastNormalized
}
// ### remove the following 3 setRect functions and their usages soon
-/*! \internal \obsolete */
+/*! \internal
+ \obsolete
+*/
void QTouchEvent::TouchPoint::setRect(const QRectF &rect)
{
if (d->ref.load() != 1)
@@ -5040,7 +5042,9 @@ void QTouchEvent::TouchPoint::setRect(const QRectF &rect)
d->ellipseDiameters = rect.size();
}
-/*! \internal \obsolete */
+/*! \internal
+ \obsolete
+*/
void QTouchEvent::TouchPoint::setSceneRect(const QRectF &sceneRect)
{
if (d->ref.load() != 1)
@@ -5049,7 +5053,9 @@ void QTouchEvent::TouchPoint::setSceneRect(const QRectF &sceneRect)
d->ellipseDiameters = sceneRect.size();
}
-/*! \internal \obsolete */
+/*! \internal
+ \obsolete
+*/
void QTouchEvent::TouchPoint::setScreenRect(const QRectF &screenRect)
{
if (d->ref.load() != 1)
diff --git a/src/gui/kernel/qinputdevicemanager_p.h b/src/gui/kernel/qinputdevicemanager_p.h
index d73c5526d0..74494d712b 100644
--- a/src/gui/kernel/qinputdevicemanager_p.h
+++ b/src/gui/kernel/qinputdevicemanager_p.h
@@ -72,7 +72,7 @@ public:
DeviceTypeTablet
};
- QInputDeviceManager(QObject *parent = 0);
+ QInputDeviceManager(QObject *parent = nullptr);
int deviceCount(DeviceType type) const;
diff --git a/src/gui/kernel/qplatforminputcontextplugin_p.h b/src/gui/kernel/qplatforminputcontextplugin_p.h
index 3f45df922e..2228d5c8d6 100644
--- a/src/gui/kernel/qplatforminputcontextplugin_p.h
+++ b/src/gui/kernel/qplatforminputcontextplugin_p.h
@@ -66,7 +66,7 @@ class Q_GUI_EXPORT QPlatformInputContextPlugin : public QObject
{
Q_OBJECT
public:
- explicit QPlatformInputContextPlugin(QObject *parent = 0);
+ explicit QPlatformInputContextPlugin(QObject *parent = nullptr);
~QPlatformInputContextPlugin();
virtual QPlatformInputContext *create(const QString &key, const QStringList &paramList) = 0;
diff --git a/src/gui/kernel/qplatformintegrationplugin.h b/src/gui/kernel/qplatformintegrationplugin.h
index f1136965b7..6a0b9dc9a7 100644
--- a/src/gui/kernel/qplatformintegrationplugin.h
+++ b/src/gui/kernel/qplatformintegrationplugin.h
@@ -64,7 +64,7 @@ class Q_GUI_EXPORT QPlatformIntegrationPlugin : public QObject
{
Q_OBJECT
public:
- explicit QPlatformIntegrationPlugin(QObject *parent = 0);
+ explicit QPlatformIntegrationPlugin(QObject *parent = nullptr);
~QPlatformIntegrationPlugin();
virtual QPlatformIntegration *create(const QString &key, const QStringList &paramList);
diff --git a/src/gui/kernel/qplatformsharedgraphicscache.h b/src/gui/kernel/qplatformsharedgraphicscache.h
index e11d2f41a9..b3b5488139 100644
--- a/src/gui/kernel/qplatformsharedgraphicscache.h
+++ b/src/gui/kernel/qplatformsharedgraphicscache.h
@@ -69,7 +69,7 @@ public:
OpenGLTexture
};
- explicit QPlatformSharedGraphicsCache(QObject *parent = 0) : QObject(parent) {}
+ explicit QPlatformSharedGraphicsCache(QObject *parent = nullptr) : QObject(parent) {}
virtual void beginRequestBatch() = 0;
virtual void ensureCacheInitialized(const QByteArray &cacheId, BufferType bufferType,
diff --git a/src/gui/kernel/qplatformthemeplugin.h b/src/gui/kernel/qplatformthemeplugin.h
index 7b56879940..0f88806b0b 100644
--- a/src/gui/kernel/qplatformthemeplugin.h
+++ b/src/gui/kernel/qplatformthemeplugin.h
@@ -63,7 +63,7 @@ class Q_GUI_EXPORT QPlatformThemePlugin : public QObject
{
Q_OBJECT
public:
- explicit QPlatformThemePlugin(QObject *parent = 0);
+ explicit QPlatformThemePlugin(QObject *parent = nullptr);
~QPlatformThemePlugin();
virtual QPlatformTheme *create(const QString &key, const QStringList &paramList) = 0;
diff --git a/src/gui/opengl/qopenglbuffer.cpp b/src/gui/opengl/qopenglbuffer.cpp
index 66cc2b5ce8..69c2baa8d9 100644
--- a/src/gui/opengl/qopenglbuffer.cpp
+++ b/src/gui/opengl/qopenglbuffer.cpp
@@ -371,7 +371,7 @@ void QOpenGLBuffer::write(int offset, const void *data, int count)
{
#ifndef QT_NO_DEBUG
if (!isCreated())
- qWarning("QOpenGLBuffer::allocate(): buffer not created");
+ qWarning("QOpenGLBuffer::write(): buffer not created");
#endif
Q_D(QOpenGLBuffer);
if (d->guard && d->guard->id())
diff --git a/src/network/access/qnetworkrequest.cpp b/src/network/access/qnetworkrequest.cpp
index c2b388bbce..57529761ee 100644
--- a/src/network/access/qnetworkrequest.cpp
+++ b/src/network/access/qnetworkrequest.cpp
@@ -664,10 +664,10 @@ void QNetworkRequest::setAttribute(Attribute code, const QVariant &value)
#ifndef QT_NO_SSL
/*!
- Returns this network request's SSL configuration. By default, no
- SSL settings are specified.
+ Returns this network request's SSL configuration. By default this is the same
+ as QSslConfiguration::defaultConfiguration().
- \sa setSslConfiguration()
+ \sa setSslConfiguration(), QSslConfiguration::defaultConfiguration()
*/
QSslConfiguration QNetworkRequest::sslConfiguration() const
{
@@ -683,9 +683,6 @@ QSslConfiguration QNetworkRequest::sslConfiguration() const
certificates and the ciphers that the SSL backend is allowed to
use.
- By default, no SSL configuration is set, which allows the backends
- to choose freely what configuration is best for them.
-
\sa sslConfiguration(), QSslConfiguration::defaultConfiguration()
*/
void QNetworkRequest::setSslConfiguration(const QSslConfiguration &config)
diff --git a/src/plugins/platforms/windows/qwindowstabletsupport.cpp b/src/plugins/platforms/windows/qwindowstabletsupport.cpp
index dad3e9df9f..2ab59b11ca 100644
--- a/src/plugins/platforms/windows/qwindowstabletsupport.cpp
+++ b/src/plugins/platforms/windows/qwindowstabletsupport.cpp
@@ -486,11 +486,8 @@ bool QWindowsTabletSupport::translateTabletPacketEvent()
const int z = currentDevice == QTabletEvent::FourDMouse ? int(packet.pkZ) : 0;
- // This code is to delay the tablet data one cycle to sync with the mouse location.
- QPointF globalPosF = m_oldGlobalPosF;
- const QPointF currentGlobalPosF =
+ QPointF globalPosF =
m_devices.at(m_currentDevice).scaleCoordinates(packet.pkX, packet.pkY, virtualDesktopArea);
- m_oldGlobalPosF = currentGlobalPosF;
QWindow *target = QGuiApplicationPrivate::tabletDevicePoint(uniqueId).target; // Pass to window that grabbed it.
@@ -498,10 +495,10 @@ bool QWindowsTabletSupport::translateTabletPacketEvent()
const QPoint mouseLocation = QWindowsCursor::mousePosition();
if (m_state == PenProximity) {
m_state = PenDown;
- m_mode = (mouseLocation - currentGlobalPosF).manhattanLength() > m_absoluteRange
+ m_mode = (mouseLocation - globalPosF).manhattanLength() > m_absoluteRange
? MouseMode : PenMode;
qCDebug(lcQpaTablet) << __FUNCTION__ << "mode=" << m_mode << "pen:"
- << currentGlobalPosF << "mouse:" << mouseLocation;
+ << globalPosF << "mouse:" << mouseLocation;
}
if (m_mode == MouseMode)
globalPosF = mouseLocation;
diff --git a/src/plugins/platforms/windows/qwindowstabletsupport.h b/src/plugins/platforms/windows/qwindowstabletsupport.h
index d98dbd4de7..d91701d6a5 100644
--- a/src/plugins/platforms/windows/qwindowstabletsupport.h
+++ b/src/plugins/platforms/windows/qwindowstabletsupport.h
@@ -150,7 +150,6 @@ private:
bool m_tiltSupport = false;
QVector<QWindowsTabletDeviceData> m_devices;
int m_currentDevice = -1;
- QPointF m_oldGlobalPosF;
Mode m_mode = PenMode;
State m_state = PenUp;
};
diff --git a/src/plugins/platforms/windows/uiautomation/qwindowsuiamainprovider.cpp b/src/plugins/platforms/windows/uiautomation/qwindowsuiamainprovider.cpp
index de06077890..fad83fb165 100644
--- a/src/plugins/platforms/windows/uiautomation/qwindowsuiamainprovider.cpp
+++ b/src/plugins/platforms/windows/uiautomation/qwindowsuiamainprovider.cpp
@@ -364,7 +364,7 @@ HRESULT QWindowsUiaMainProvider::GetPropertyValue(PROPERTYID idProp, VARIANT *pR
setVariantBool(accessible->state().focusable, pRetVal);
break;
case UIA_IsOffscreenPropertyId:
- setVariantBool(false, pRetVal);
+ setVariantBool(accessible->state().offscreen, pRetVal);
break;
case UIA_IsContentElementPropertyId:
setVariantBool(true, pRetVal);
@@ -453,30 +453,53 @@ HRESULT QWindowsUiaMainProvider::Navigate(NavigateDirection direction, IRawEleme
QAccessibleInterface *targetacc = nullptr;
- switch (direction) {
- case NavigateDirection_Parent:
- targetacc = accessible->parent();
- if (targetacc && (targetacc->role() == QAccessible::Application)) {
- targetacc = nullptr; // The app's children are considered top level objects.
- }
- break;
- case NavigateDirection_FirstChild:
- targetacc = accessible->child(0);
- break;
- case NavigateDirection_LastChild:
- targetacc = accessible->child(accessible->childCount() - 1);
- break;
- case NavigateDirection_NextSibling:
- case NavigateDirection_PreviousSibling:
+ if (direction == NavigateDirection_Parent) {
if (QAccessibleInterface *parent = accessible->parent()) {
- if (parent->isValid()) {
- int index = parent->indexOfChild(accessible);
- index += (direction == NavigateDirection_NextSibling) ? 1 : -1;
- if (index >= 0 && index < parent->childCount())
- targetacc = parent->child(index);
+ // The Application's children are considered top level objects.
+ if (parent->isValid() && parent->role() != QAccessible::Application) {
+ targetacc = parent;
+ }
+ }
+ } else {
+ QAccessibleInterface *parent = nullptr;
+ int index = 0;
+ int incr = 1;
+ switch (direction) {
+ case NavigateDirection_FirstChild:
+ parent = accessible;
+ index = 0;
+ incr = 1;
+ break;
+ case NavigateDirection_LastChild:
+ parent = accessible;
+ index = accessible->childCount() - 1;
+ incr = -1;
+ break;
+ case NavigateDirection_NextSibling:
+ if ((parent = accessible->parent()))
+ index = parent->indexOfChild(accessible) + 1;
+ incr = 1;
+ break;
+ case NavigateDirection_PreviousSibling:
+ if ((parent = accessible->parent()))
+ index = parent->indexOfChild(accessible) - 1;
+ incr = -1;
+ break;
+ default:
+ Q_UNREACHABLE();
+ break;
+ }
+
+ if (parent && parent->isValid()) {
+ for (int count = parent->childCount(); index >= 0 && index < count; index += incr) {
+ if (QAccessibleInterface *child = parent->child(index)) {
+ if (child->isValid() && !child->state().invisible) {
+ targetacc = child;
+ break;
+ }
+ }
}
}
- break;
}
if (targetacc)
diff --git a/src/plugins/platforms/winrt/qwinrtscreen.cpp b/src/plugins/platforms/winrt/qwinrtscreen.cpp
index 2e79c3505b..7d6659f5e2 100644
--- a/src/plugins/platforms/winrt/qwinrtscreen.cpp
+++ b/src/plugins/platforms/winrt/qwinrtscreen.cpp
@@ -1253,10 +1253,11 @@ HRESULT QWinRTScreen::onPointerUpdated(ICoreWindow *, IPointerEventArgs *args)
boolean isPressed;
pointerPoint->get_IsInContact(&isPressed);
- // Devices like the Hololens set a static pressure of 0.5 independent
- // of the pressed state. In those cases we need to synthesize the
- // pressure value. To our knowledge this does not apply to pens
- if (pointerDeviceType == PointerDeviceType_Touch && pressure == 0.5f)
+ // Devices like the Hololens set a static pressure of 0.0 or 0.5
+ // (depending on the image) independent of the pressed state.
+ // In those cases we need to synthesize the pressure value. To our
+ // knowledge this does not apply to pens
+ if (pointerDeviceType == PointerDeviceType_Touch && (pressure == 0.0f || pressure == 0.5f))
pressure = isPressed ? 1. : 0.;
const QRectF areaRect(area.X * d->scaleFactor, area.Y * d->scaleFactor,
diff --git a/src/plugins/platforms/xcb/qxcbconnection.cpp b/src/plugins/platforms/xcb/qxcbconnection.cpp
index b4ccb808a0..327811a7d2 100644
--- a/src/plugins/platforms/xcb/qxcbconnection.cpp
+++ b/src/plugins/platforms/xcb/qxcbconnection.cpp
@@ -591,6 +591,8 @@ QXcbConnection::QXcbConnection(QXcbNativeInterface *nativeInterface, bool canGra
m_setup = xcb_get_setup(xcb_connection());
+ m_xdgCurrentDesktop = qgetenv("XDG_CURRENT_DESKTOP").toLower();
+
initializeAllAtoms();
initializeXSync();
@@ -1691,8 +1693,10 @@ bool QXcbConnection::compressEvent(xcb_generic_event_t *event, int currentIndex,
continue;
if (isXIType(next, m_xiOpCode, XCB_INPUT_TOUCH_UPDATE)) {
auto *touchUpdateNextEvent = reinterpret_cast<xcb_input_touch_update_event_t *>(next);
- if (id == touchUpdateNextEvent->detail % INT_MAX)
+ if (id == touchUpdateNextEvent->detail % INT_MAX &&
+ touchUpdateNextEvent->deviceid == touchUpdateEvent->deviceid) {
return true;
+ }
}
}
return false;
diff --git a/src/plugins/platforms/xcb/qxcbconnection.h b/src/plugins/platforms/xcb/qxcbconnection.h
index fa39bf6874..c24bcf91f3 100644
--- a/src/plugins/platforms/xcb/qxcbconnection.h
+++ b/src/plugins/platforms/xcb/qxcbconnection.h
@@ -503,6 +503,9 @@ public:
void grabServer();
void ungrabServer();
+ bool isUnity() const { return m_xdgCurrentDesktop == "unity"; }
+ bool isGnome() const { return m_xdgCurrentDesktop == "gnome"; }
+
QXcbNativeInterface *nativeInterface() const { return m_nativeInterface; }
QXcbSystemTrayTracker *systemTrayTracker() const;
@@ -521,6 +524,7 @@ public:
Qt::MouseButton xiToQtMouseButton(uint32_t b);
void xi2UpdateScrollingDevices();
bool startSystemMoveResizeForTouchBegin(xcb_window_t window, const QPoint &point, int corner);
+ void abortSystemMoveResizeForTouch();
bool isTouchScreen(int id);
#endif
QXcbEventReader *eventReader() const { return m_reader; }
@@ -565,6 +569,7 @@ private:
bool m_xi2Enabled = false;
#if QT_CONFIG(xcb_xinput)
+ QVector<int> m_floatingSlaveDevices;
int m_xi2Minor = -1;
void initializeXInput2();
void xi2SetupDevice(void *info, bool removeExisting = true);
@@ -720,6 +725,8 @@ private:
bool m_peekerIndexCacheDirty = false;
QHash<qint32, qint32> m_peekerToCachedIndex;
friend class QXcbEventReader;
+
+ QByteArray m_xdgCurrentDesktop;
};
#if QT_CONFIG(xcb_xinput)
#if QT_CONFIG(tabletevent)
diff --git a/src/plugins/platforms/xcb/qxcbconnection_xi2.cpp b/src/plugins/platforms/xcb/qxcbconnection_xi2.cpp
index 7ed61bff4e..01b1b37bb8 100644
--- a/src/plugins/platforms/xcb/qxcbconnection_xi2.cpp
+++ b/src/plugins/platforms/xcb/qxcbconnection_xi2.cpp
@@ -113,7 +113,7 @@ void QXcbConnection::xi2SelectDeviceEvents(xcb_window_t window)
}
qt_xcb_input_event_mask_t mask;
- mask.header.deviceid = XCB_INPUT_DEVICE_ALL_MASTER;
+ mask.header.deviceid = XCB_INPUT_DEVICE_ALL;
mask.header.mask_len = 1;
mask.mask = bitMask;
xcb_void_cookie_t cookie =
@@ -309,6 +309,7 @@ void QXcbConnection::xi2SetupDevices()
m_scrollingDevices.clear();
m_touchDevices.clear();
m_xiMasterPointerIds.clear();
+ m_floatingSlaveDevices.clear();
auto reply = Q_XCB_REPLY(xcb_input_xi_query_device, m_connection, XCB_INPUT_DEVICE_ALL);
if (!reply) {
@@ -319,6 +320,10 @@ void QXcbConnection::xi2SetupDevices()
auto it = xcb_input_xi_query_device_infos_iterator(reply.get());
for (; it.rem; xcb_input_xi_device_info_next(&it)) {
xcb_input_xi_device_info_t *deviceInfo = it.data;
+ if (deviceInfo->type == XCB_INPUT_DEVICE_TYPE_FLOATING_SLAVE) {
+ m_floatingSlaveDevices.append(deviceInfo->deviceid);
+ continue;
+ }
if (deviceInfo->type == XCB_INPUT_DEVICE_TYPE_MASTER_POINTER) {
m_xiMasterPointerIds.append(deviceInfo->deviceid);
continue;
@@ -542,6 +547,72 @@ static inline qreal fixed1616ToReal(xcb_input_fp1616_t val)
}
#endif // QT_CONFIG(tabletevent)
+namespace {
+
+/*! \internal
+
+ Qt listens for XIAllDevices to avoid losing mouse events. This function
+ ensures that we don't process the same event twice: from a slave device and
+ then again from a master device.
+
+ In a normal use case (e.g. mouse press and release inside a window), we will
+ drop events from master devices as duplicates. Other advantage of processing
+ events from slave devices is that they don't share button state. All buttons
+ on a master device share the state.
+
+ Examples of special cases:
+
+ - During system move/resize, window manager (_NET_WM_MOVERESIZE) grabs the
+ master pointer, in this case we process the matching release from the slave
+ device. A master device event is not sent by the server, hence no duplicate
+ event to drop. If we listened for XIAllMasterDevices instead, we would never
+ see a release event in this case.
+
+ - If we dismiss a context menu by clicking somewhere outside a Qt application,
+ we will process the mouse press from the master pointer as that is the
+ device we are grabbing. We are not grabbing slave devices (grabbing on the
+ slave device is buggy according to 19d289ab1b5bde3e136765e5432b5c7d004df3a4).
+ And since the event occurs outside our window, the slave device event is
+ not sent to us by the server, hence no duplicate event to drop.
+*/
+bool isDuplicateEvent(xcb_ge_event_t *event)
+{
+ struct qXIEvent {
+ bool isValid = false;
+ uint16_t sourceid;
+ uint8_t event_type;
+ uint32_t detail;
+ int32_t root_x;
+ int32_t root_y;
+ };
+ static qXIEvent lastSeenEvent;
+
+ bool isDuplicate = false;
+ auto xiDeviceEvent = reinterpret_cast<qt_xcb_input_device_event_t *>(event);
+ if (lastSeenEvent.isValid) {
+ isDuplicate = lastSeenEvent.sourceid == xiDeviceEvent->sourceid &&
+ lastSeenEvent.event_type == xiDeviceEvent->event_type &&
+ lastSeenEvent.detail == xiDeviceEvent->detail &&
+ lastSeenEvent.root_x == xiDeviceEvent->root_x &&
+ lastSeenEvent.root_y == xiDeviceEvent->root_y;
+ } else {
+ lastSeenEvent.isValid = true;
+ }
+ lastSeenEvent.sourceid = xiDeviceEvent->sourceid;
+ lastSeenEvent.event_type = xiDeviceEvent->event_type;
+ lastSeenEvent.detail = xiDeviceEvent->detail;
+ lastSeenEvent.root_x = xiDeviceEvent->root_x;
+ lastSeenEvent.root_y = xiDeviceEvent->root_y;
+
+ if (isDuplicate)
+ // This sanity check ensures that special cases like QTBUG-59277 keep working.
+ lastSeenEvent.isValid = false; // An event can be a duplicate only once.
+
+ return isDuplicate;
+}
+
+} // namespace
+
void QXcbConnection::xi2HandleEvent(xcb_ge_event_t *event)
{
auto *xiEvent = reinterpret_cast<qt_xcb_input_device_event_t *>(event);
@@ -550,15 +621,31 @@ void QXcbConnection::xi2HandleEvent(xcb_ge_event_t *event)
xcb_input_enter_event_t *xiEnterEvent = nullptr;
QXcbWindowEventListener *eventListener = 0;
+ bool isTouchEvent = true;
switch (xiEvent->event_type) {
case XCB_INPUT_BUTTON_PRESS:
case XCB_INPUT_BUTTON_RELEASE:
case XCB_INPUT_MOTION:
+ isTouchEvent = false;
+ if (!xi2MouseEventsDisabled() && isDuplicateEvent(event))
+ return;
case XCB_INPUT_TOUCH_BEGIN:
case XCB_INPUT_TOUCH_UPDATE:
case XCB_INPUT_TOUCH_END:
{
xiDeviceEvent = xiEvent;
+
+ if (m_floatingSlaveDevices.contains(xiDeviceEvent->sourceid))
+ return; // Not interested in floating slave device events, only in attached slaves.
+
+ bool isSlaveEvent = xiDeviceEvent->deviceid == xiDeviceEvent->sourceid;
+ if (!xi2MouseEventsDisabled() && isTouchEvent && isSlaveEvent) {
+ // For touch events we want events only from master devices, at least
+ // currently there is no apparent reason why we would need to consider
+ // events from slave devices.
+ return;
+ }
+
eventListener = windowEventListenerFromId(xiDeviceEvent->event);
sourceDeviceId = xiDeviceEvent->sourceid; // use the actual device id instead of the master
break;
@@ -830,6 +917,11 @@ bool QXcbConnection::startSystemMoveResizeForTouchBegin(xcb_window_t window, con
return false;
}
+void QXcbConnection::abortSystemMoveResizeForTouch()
+{
+ m_startSystemMoveResizeInfo.window = XCB_NONE;
+}
+
bool QXcbConnection::xi2SetMouseGrabEnabled(xcb_window_t w, bool grab)
{
bool ok = false;
diff --git a/src/plugins/platforms/xcb/qxcbwindow.cpp b/src/plugins/platforms/xcb/qxcbwindow.cpp
index 9e9058e6e9..ed37c6b1f8 100644
--- a/src/plugins/platforms/xcb/qxcbwindow.cpp
+++ b/src/plugins/platforms/xcb/qxcbwindow.cpp
@@ -2569,18 +2569,41 @@ bool QXcbWindow::startSystemMove(const QPoint &pos)
bool QXcbWindow::startSystemMoveResize(const QPoint &pos, int corner)
{
+#if QT_CONFIG(xcb_xinput)
const xcb_atom_t moveResize = connection()->atom(QXcbAtom::_NET_WM_MOVERESIZE);
if (!connection()->wmSupport()->isSupportedByWM(moveResize))
return false;
+
const QPoint globalPos = QHighDpi::toNativePixels(window()->mapToGlobal(pos), window()->screen());
-#if QT_CONFIG(xcb_xinput)
- if (connection()->startSystemMoveResizeForTouchBegin(m_window, globalPos, corner))
- return true;
-#endif
- return doStartSystemMoveResize(globalPos, corner);
+ // ### FIXME QTBUG-53389
+ bool startedByTouch = connection()->startSystemMoveResizeForTouchBegin(m_window, globalPos, corner);
+ if (startedByTouch) {
+ if (connection()->isUnity() || connection()->isGnome()) {
+ // These desktops fail to move/resize via _NET_WM_MOVERESIZE (WM bug?).
+ connection()->abortSystemMoveResizeForTouch();
+ return false;
+ }
+ // KWin, Openbox, AwesomeWM have been tested to work with _NET_WM_MOVERESIZE.
+ } else { // Started by mouse press.
+ if (!connection()->hasXInput2() || connection()->xi2MouseEventsDisabled()) {
+ // Without XI2 we can't get button press/move/release events.
+ return false;
+ }
+ if (connection()->isUnity())
+ return false; // _NET_WM_MOVERESIZE on this WM is bouncy (WM bug?).
+
+ doStartSystemMoveResize(globalPos, corner);
+ }
+
+ return true;
+#else
+ Q_UNUSED(pos);
+ Q_UNUSED(corner);
+ return false;
+#endif // xcb_xinput
}
-bool QXcbWindow::doStartSystemMoveResize(const QPoint &globalPos, int corner)
+void QXcbWindow::doStartSystemMoveResize(const QPoint &globalPos, int corner)
{
const xcb_atom_t moveResize = connection()->atom(QXcbAtom::_NET_WM_MOVERESIZE);
xcb_client_message_event_t xev;
@@ -2607,7 +2630,6 @@ bool QXcbWindow::doStartSystemMoveResize(const QPoint &globalPos, int corner)
xcb_send_event(connection()->xcb_connection(), false, xcbScreen()->root(),
XCB_EVENT_MASK_SUBSTRUCTURE_REDIRECT | XCB_EVENT_MASK_SUBSTRUCTURE_NOTIFY,
(const char *)&xev);
- return true;
}
// Sends an XEmbed message.
diff --git a/src/plugins/platforms/xcb/qxcbwindow.h b/src/plugins/platforms/xcb/qxcbwindow.h
index c6bc915b6a..047ee2eae9 100644
--- a/src/plugins/platforms/xcb/qxcbwindow.h
+++ b/src/plugins/platforms/xcb/qxcbwindow.h
@@ -171,7 +171,7 @@ public:
QXcbScreen *xcbScreen() const;
bool startSystemMoveResize(const QPoint &pos, int corner);
- bool doStartSystemMoveResize(const QPoint &globalPos, int corner);
+ void doStartSystemMoveResize(const QPoint &globalPos, int corner);
bool isTrayIconWindow() const { return m_trayIconWindow; }
diff --git a/src/plugins/styles/mac/qmacstyle_mac.mm b/src/plugins/styles/mac/qmacstyle_mac.mm
index ae2765fc40..dd7959f3cf 100644
--- a/src/plugins/styles/mac/qmacstyle_mac.mm
+++ b/src/plugins/styles/mac/qmacstyle_mac.mm
@@ -1827,6 +1827,8 @@ NSView *QMacStylePrivate::cocoaControl(CocoaControl widget) const
auto *button = static_cast<NSButton *>(bv);
button.buttonType = buttonType;
button.bezelStyle = bezelStyle;
+ if (widget.type == Button_CheckBox)
+ button.allowsMixedState = YES;
}
return bv;
diff --git a/src/widgets/doc/src/widgets-and-layouts/styles.qdoc b/src/widgets/doc/src/widgets-and-layouts/styles.qdoc
index 6e697a97df..a9d95bc02c 100644
--- a/src/widgets/doc/src/widgets-and-layouts/styles.qdoc
+++ b/src/widgets/doc/src/widgets-and-layouts/styles.qdoc
@@ -37,10 +37,9 @@
drawing, ensuring that they look exactly like the equivalent
native widgets.
- Qt comes with a selection of built-in styles. Some styles are only
- available on specific platforms (such as the Mac and Windows
- Vista styles). Custom styles are made available as plugins or by
- creating an instance of a specific style class with
+ Qt comes with a selection of built-in styles. Certain styles are only
+ available on specific platforms. Custom styles are made available as
+ plugins or by creating an instance of a specific style class with
QStyleFactory::create() and setting it with QApplication::setStyle().
\section1 Customizing a Style
diff --git a/src/widgets/styles/qstylesheetstyle.cpp b/src/widgets/styles/qstylesheetstyle.cpp
index 538d6a6455..fccc4f33a2 100644
--- a/src/widgets/styles/qstylesheetstyle.cpp
+++ b/src/widgets/styles/qstylesheetstyle.cpp
@@ -2925,10 +2925,7 @@ void QStyleSheetStyle::unpolish(QWidget *w)
styleSheetCaches->renderRulesCache.remove(w);
styleSheetCaches->styleSheetCache.remove(w);
unsetPalette(w);
- w->setProperty("_q_stylesheet_minw", QVariant());
- w->setProperty("_q_stylesheet_minh", QVariant());
- w->setProperty("_q_stylesheet_maxw", QVariant());
- w->setProperty("_q_stylesheet_maxh", QVariant());
+ setGeometry(w);
w->setAttribute(Qt::WA_StyleSheetTarget, false);
w->setAttribute(Qt::WA_StyleSheet, false);
QObject::disconnect(w, 0, this, 0);
diff --git a/tests/auto/corelib/io/largefile/tst_largefile.cpp b/tests/auto/corelib/io/largefile/tst_largefile.cpp
index 2d13e6166d..dca7672b8e 100644
--- a/tests/auto/corelib/io/largefile/tst_largefile.cpp
+++ b/tests/auto/corelib/io/largefile/tst_largefile.cpp
@@ -510,7 +510,7 @@ void tst_LargeFile::mapFile()
//Mac: memory-mapping beyond EOF may succeed but it could generate bus error on access
//FreeBSD: same
//Linux: memory-mapping beyond EOF usually succeeds, but depends on the filesystem
-// 32-bit: limited to 44-bit offsets
+// 32-bit: limited to 44-bit offsets (when sizeof(off_t) == 8)
//Windows: memory-mapping beyond EOF is not allowed
void tst_LargeFile::mapOffsetOverflow()
{
@@ -521,9 +521,9 @@ void tst_LargeFile::mapOffsetOverflow()
#else
Succeeds = true,
# if (defined(Q_OS_LINUX) || defined(Q_OS_ANDROID)) && Q_PROCESSOR_WORDSIZE == 4
- MaxOffset = 43
+ MaxOffset = sizeof(QT_OFF_T) > 4 ? 43 : 30
# else
- MaxOffset = 63
+ MaxOffset = 8 * sizeof(QT_OFF_T) - 1
# endif
#endif
};
diff --git a/tests/auto/corelib/io/qdir/tst_qdir.cpp b/tests/auto/corelib/io/qdir/tst_qdir.cpp
index 83492188a9..afa15fe895 100644
--- a/tests/auto/corelib/io/qdir/tst_qdir.cpp
+++ b/tests/auto/corelib/io/qdir/tst_qdir.cpp
@@ -56,6 +56,12 @@
#define Q_NO_SYMLINKS
#endif
+#ifdef Q_OS_WIN
+#define DRIVE "Q:"
+#else
+#define DRIVE
+#endif
+
#ifdef QT_BUILD_INTERNAL
QT_BEGIN_NAMESPACE
@@ -1385,14 +1391,12 @@ void tst_QDir::absoluteFilePath_data()
QTest::addColumn<QString>("expectedFilePath");
#if defined(Q_OS_WIN) && !defined(Q_OS_WINRT)
- QTest::newRow("UNC") << "//machine" << "share" << "//machine/share";
- QTest::newRow("Drive") << "c:/side/town" << "/my/way/home" << "c:/my/way/home";
-#endif
-
-#ifdef Q_OS_WIN
-#define DRIVE "Q:"
-#else
-#define DRIVE
+ QTest::newRow("UNC-rel") << "//machine/share" << "dir" << "//machine/share/dir";
+ QTest::newRow("UNC-abs") << "//machine/share/path/to/blah" << "/dir" << "//machine/share/dir";
+ QTest::newRow("UNC-UNC") << "//machine/share/path/to/blah" << "//host/share/path" << "//host/share/path";
+ QTest::newRow("Drive-UNC") << "c:/side/town" << "//host/share/path" << "//host/share/path";
+ QTest::newRow("Drive-LTUNC") << "c:/side/town" << "\\/leaning\\toothpick/path" << "\\/leaning\\toothpick/path";
+ QTest::newRow("Drive-abs") << "c:/side/town" << "/my/way/home" << "c:/my/way/home";
#endif
QTest::newRow("0") << DRIVE "/etc" << "/passwd" << DRIVE "/passwd";
@@ -1401,8 +1405,10 @@ void tst_QDir::absoluteFilePath_data()
QTest::newRow("3") << "relative" << "path" << QDir::currentPath() + "/relative/path";
QTest::newRow("4") << "" << "" << QDir::currentPath();
- QTest::newRow("resource") << ":/prefix" << "foo.bar" << ":/prefix/foo.bar";
-#undef DRIVE
+ // Resource paths are absolute:
+ QTest::newRow("resource-rel") << ":/prefix" << "foo.bar" << ":/prefix/foo.bar";
+ QTest::newRow("abs-res-res") << ":/prefix" << ":/abc.txt" << ":/abc.txt";
+ QTest::newRow("abs-res-path") << DRIVE "/etc" << ":/abc.txt" << ":/abc.txt";
}
void tst_QDir::absoluteFilePath()
@@ -1517,12 +1523,17 @@ void tst_QDir::filePath_data()
QTest::addColumn<QString>("fileName");
QTest::addColumn<QString>("expectedFilePath");
- QTest::newRow("0") << "/etc" << "/passwd" << "/passwd";
- QTest::newRow("1") << "/etc" << "passwd" << "/etc/passwd";
- QTest::newRow("2") << "/" << "passwd" << "/passwd";
- QTest::newRow("3") << "relative" << "path" << "relative/path";
- QTest::newRow("4") << "" << "" << ".";
+ QTest::newRow("abs-abs") << DRIVE "/etc" << DRIVE "/passwd" << DRIVE "/passwd";
+ QTest::newRow("abs-rel") << DRIVE "/etc" << "passwd" << DRIVE "/etc/passwd";
+ QTest::newRow("root-rel") << DRIVE "/" << "passwd" << DRIVE "/passwd";
+ QTest::newRow("rel-rel") << "relative" << "path" << "relative/path";
+ QTest::newRow("empty-empty") << "" << "" << ".";
QTest::newRow("resource") << ":/prefix" << "foo.bar" << ":/prefix/foo.bar";
+#ifdef Q_OS_WIN
+ QTest::newRow("abs-LTUNC") << "Q:/path" << "\\/leaning\\tooth/pick" << "\\/leaning\\tooth/pick";
+ QTest::newRow("LTUNC-slash") << "\\/leaning\\tooth/pick" << "/path" << "//leaning/tooth/path";
+ QTest::newRow("LTUNC-abs") << "\\/leaning\\tooth/pick" << "Q:/path" << "Q:/path";
+#endif
}
void tst_QDir::filePath()
@@ -1588,6 +1599,9 @@ void tst_QDir::exists2_data()
QTest::newRow("2") << "" << false;
QTest::newRow("3") << "testData" << true;
QTest::newRow("4") << "/testData" << false;
+#ifdef Q_OS_WIN
+ QTest::newRow("abs") << "Q:/testData" << false;
+#endif
QTest::newRow("5") << "tst_qdir.cpp" << true;
QTest::newRow("6") << "/resources.cpp" << false;
QTest::newRow("resource0") << ":/prefix/foo.bar" << false;
diff --git a/tests/auto/corelib/itemmodels/qsortfilterproxymodel/tst_qsortfilterproxymodel.cpp b/tests/auto/corelib/itemmodels/qsortfilterproxymodel/tst_qsortfilterproxymodel.cpp
index a9138a6505..b3431bcc7a 100644
--- a/tests/auto/corelib/itemmodels/qsortfilterproxymodel/tst_qsortfilterproxymodel.cpp
+++ b/tests/auto/corelib/itemmodels/qsortfilterproxymodel/tst_qsortfilterproxymodel.cpp
@@ -1904,6 +1904,8 @@ void tst_QSortFilterProxyModel::changeSourceData_data()
QTest::addColumn<QString>("newValue");
QTest::addColumn<IntPairList>("removeIntervals");
QTest::addColumn<IntPairList>("insertIntervals");
+ QTest::addColumn<int>("expectedDataChangedRow"); // -1 if no dataChanged signal expected
+ QTest::addColumn<bool>("expectedLayoutChanged");
QTest::addColumn<QStringList>("proxyItems");
QTest::newRow("move_to_end_ascending")
@@ -1916,6 +1918,8 @@ void tst_QSortFilterProxyModel::changeSourceData_data()
<< "z" // newValue
<< IntPairList() // removeIntervals
<< IntPairList() // insertIntervals
+ << 2 // dataChanged(row 2) is emitted, see comment "Make sure we also emit dataChanged for the rows" in the source code (unclear why, though)
+ << true // layoutChanged
<< (QStringList() << "b" << "c" << "z") // proxyItems
;
@@ -1929,6 +1933,8 @@ void tst_QSortFilterProxyModel::changeSourceData_data()
<< "a" // newValue
<< IntPairList() // removeIntervals
<< IntPairList() // insertIntervals
+ << 2 // dataChanged(row 2) is emitted, see comment "Make sure we also emit dataChanged for the rows" in the source code (unclear why, though)
+ << true // layoutChanged
<< (QStringList() << "z" << "b" << "a") // proxyItems
;
@@ -1942,9 +1948,26 @@ void tst_QSortFilterProxyModel::changeSourceData_data()
<< "a" // newValue
<< IntPairList() // removeIntervals
<< IntPairList() // insertIntervals
+ << -1 // no dataChanged signal
+ << false // layoutChanged
<< (QStringList() << "b" << "a") // proxyItems
;
+ QTest::newRow("no_effect_on_filtering")
+ << (QStringList() << "a" << "b") // sourceItems
+ << static_cast<int>(Qt::AscendingOrder) // sortOrder
+ << "" // filter
+ << (QStringList() << "a" << "b") // expectedInitialProxyItems
+ << true // dynamic
+ << 1 // row
+ << "z" // newValue
+ << IntPairList() // removeIntervals
+ << IntPairList() // insertIntervals
+ << 1 // expectedDataChangedRow
+ << false // layoutChanged
+ << (QStringList() << "a" << "z") // proxyItems
+ ;
+
QTest::newRow("filtered_out_value_stays_out")
<< (QStringList() << "a" << "b" << "c" << "d") // sourceItems
<< static_cast<int>(Qt::AscendingOrder) // sortOrder
@@ -1955,6 +1978,8 @@ void tst_QSortFilterProxyModel::changeSourceData_data()
<< "x" // newValue
<< IntPairList() // removeIntervals
<< IntPairList() // insertIntervals
+ << -1 // no dataChanged signal
+ << false // layoutChanged
<< (QStringList() << "a" << "c") // proxyItems
;
@@ -1968,6 +1993,8 @@ void tst_QSortFilterProxyModel::changeSourceData_data()
<< "x" // newValue
<< IntPairList() // removeIntervals
<< (IntPairList() << IntPair(2, 2)) // insertIntervals
+ << -1 // no dataChanged signal
+ << false // layoutChanged
<< (QStringList() << "a" << "c" << "x") // proxyItems
;
@@ -1981,6 +2008,8 @@ void tst_QSortFilterProxyModel::changeSourceData_data()
<< "x" // newValue
<< (IntPairList() << IntPair(1, 1)) // removeIntervals
<< IntPairList() // insertIntervals
+ << -1 // no dataChanged signal
+ << false // layoutChanged
<< (QStringList() << "a") // proxyItems
;
@@ -1994,6 +2023,8 @@ void tst_QSortFilterProxyModel::changeSourceData_data()
<< "x" // newValue
<< IntPairList() // removeIntervals
<< IntPairList() // insertIntervals
+ << 0 // expectedDataChangedRow
+ << false // layoutChanged
<< (QStringList() << "x" << "b" << "c") // proxyItems
;
}
@@ -2009,6 +2040,8 @@ void tst_QSortFilterProxyModel::changeSourceData()
QFETCH(QString, newValue);
QFETCH(IntPairList, removeIntervals);
QFETCH(IntPairList, insertIntervals);
+ QFETCH(int, expectedDataChangedRow);
+ QFETCH(bool, expectedLayoutChanged);
QFETCH(QStringList, proxyItems);
QStandardItemModel model;
@@ -2037,9 +2070,13 @@ void tst_QSortFilterProxyModel::changeSourceData()
QSignalSpy removeSpy(&proxy, &QSortFilterProxyModel::rowsRemoved);
QSignalSpy insertSpy(&proxy, &QSortFilterProxyModel::rowsInserted);
+ QSignalSpy dataChangedSpy(&proxy, &QSortFilterProxyModel::dataChanged);
+ QSignalSpy layoutChangedSpy(&proxy, &QSortFilterProxyModel::layoutChanged);
QVERIFY(removeSpy.isValid());
QVERIFY(insertSpy.isValid());
+ QVERIFY(dataChangedSpy.isValid());
+ QVERIFY(layoutChangedSpy.isValid());
{
QModelIndex index = model.index(row, 0, QModelIndex());
@@ -2069,6 +2106,17 @@ void tst_QSortFilterProxyModel::changeSourceData()
QModelIndex index = proxy.index(i, 0, QModelIndex());
QCOMPARE(proxy.data(index, Qt::DisplayRole).toString(), proxyItems.at(i));
}
+
+ if (expectedDataChangedRow == -1) {
+ QCOMPARE(dataChangedSpy.count(), 0);
+ } else {
+ QCOMPARE(dataChangedSpy.count(), 1);
+ const QModelIndex idx = dataChangedSpy.at(0).at(0).value<QModelIndex>();
+ QCOMPARE(idx.row(), expectedDataChangedRow);
+ QCOMPARE(idx.column(), 0);
+ }
+
+ QCOMPARE(layoutChangedSpy.count(), expectedLayoutChanged ? 1 : 0);
}
// Checks that the model is a table, and that each and every row is like this:
diff --git a/tests/auto/corelib/plugin/plugin.pro b/tests/auto/corelib/plugin/plugin.pro
index 774edc655a..b094c24e55 100644
--- a/tests/auto/corelib/plugin/plugin.pro
+++ b/tests/auto/corelib/plugin/plugin.pro
@@ -11,5 +11,6 @@ qtConfig(library): SUBDIRS += \
contains(CONFIG, static) {
message(Disabling tests requiring shared build of Qt)
SUBDIRS -= qfactoryloader \
+ qplugin \
qpluginloader
}
diff --git a/tests/auto/corelib/plugin/qplugin/invalidplugin/invalidplugin.pro b/tests/auto/corelib/plugin/qplugin/invalidplugin/invalidplugin.pro
new file mode 100644
index 0000000000..d953c6d367
--- /dev/null
+++ b/tests/auto/corelib/plugin/qplugin/invalidplugin/invalidplugin.pro
@@ -0,0 +1,5 @@
+QT = core
+TEMPLATE = lib
+CONFIG += plugin
+SOURCES = main.cpp
+DESTDIR = ../plugins
diff --git a/tests/auto/corelib/plugin/qplugin/invalidplugin/main.cpp b/tests/auto/corelib/plugin/qplugin/invalidplugin/main.cpp
new file mode 100644
index 0000000000..e6603ec89f
--- /dev/null
+++ b/tests/auto/corelib/plugin/qplugin/invalidplugin/main.cpp
@@ -0,0 +1,49 @@
+/****************************************************************************
+**
+** Copyright (C) 2018 Intel Corporation.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the test suite of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:GPL-EXCEPT$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3 as published by the Free Software
+** Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <qplugin.h>
+
+QT_PLUGIN_METADATA_SECTION
+static const char pluginMetaData[512] = {
+ 'q', 'p', 'l', 'u', 'g', 'i', 'n', ' ',
+ 't', 'e', 's', 't', 'f', 'i', 'l', 'e'
+};
+
+extern "C" {
+
+const void *qt_plugin_query_metadata()
+{
+ return pluginMetaData;
+}
+
+Q_DECL_EXPORT void *qt_plugin_instance()
+{
+ return nullptr;
+}
+
+}
diff --git a/tests/auto/corelib/plugin/qplugin/qplugin.pro b/tests/auto/corelib/plugin/qplugin/qplugin.pro
index 5283c2d52b..96fc704c07 100644
--- a/tests/auto/corelib/plugin/qplugin/qplugin.pro
+++ b/tests/auto/corelib/plugin/qplugin/qplugin.pro
@@ -1,5 +1,5 @@
TEMPLATE = subdirs
-TESTPLUGINS =
+TESTPLUGINS = invalidplugin
win32 {
contains(QT_CONFIG, debug): TESTPLUGINS += debugplugin
@@ -8,7 +8,7 @@ win32 {
CONFIG(debug, debug|release): TESTPLUGINS += debugplugin
CONFIG(release, debug|release): TESTPLUGINS += releaseplugin
} else {
- TESTPLUGINS = debugplugin releaseplugin
+ TESTPLUGINS += debugplugin releaseplugin
}
SUBDIRS += main $$TESTPLUGINS
diff --git a/tests/auto/corelib/plugin/qplugin/tst_qplugin.cpp b/tests/auto/corelib/plugin/qplugin/tst_qplugin.cpp
index ee7cf7ded8..d285ed79c0 100644
--- a/tests/auto/corelib/plugin/qplugin/tst_qplugin.cpp
+++ b/tests/auto/corelib/plugin/qplugin/tst_qplugin.cpp
@@ -37,6 +37,7 @@ class tst_QPlugin : public QObject
Q_OBJECT
QDir dir;
+ QString invalidPluginName;
public:
tst_QPlugin();
@@ -45,6 +46,8 @@ private slots:
void initTestCase();
void loadDebugPlugin();
void loadReleasePlugin();
+ void scanInvalidPlugin_data();
+ void scanInvalidPlugin();
};
tst_QPlugin::tst_QPlugin()
@@ -57,6 +60,10 @@ void tst_QPlugin::initTestCase()
QVERIFY2(dir.exists(),
qPrintable(QString::fromLatin1("Cannot find the 'plugins' directory starting from '%1'").
arg(QDir::toNativeSeparators(QDir::currentPath()))));
+
+ const auto fileNames = dir.entryList({"*invalid*"}, QDir::Files);
+ if (!fileNames.isEmpty())
+ invalidPluginName = dir.absoluteFilePath(fileNames.first());
}
void tst_QPlugin::loadDebugPlugin()
@@ -90,6 +97,7 @@ void tst_QPlugin::loadReleasePlugin()
{
const auto fileNames = dir.entryList(QStringList() << "*release*", QDir::Files);
for (const QString &fileName : fileNames) {
+ if (!QLibrary::isLibrary(fileName))
continue;
QPluginLoader loader(dir.filePath(fileName));
#if defined(Q_OS_UNIX) && !defined(Q_OS_MAC)
@@ -112,5 +120,105 @@ void tst_QPlugin::loadReleasePlugin()
}
}
+void tst_QPlugin::scanInvalidPlugin_data()
+{
+ QTest::addColumn<QByteArray>("metadata");
+ QTest::addColumn<bool>("loads");
+
+ QByteArray prefix = "QTMETADATA ";
+
+ {
+ QJsonObject obj;
+ obj.insert("IID", "org.qt-project.tst_qplugin");
+ obj.insert("className", "tst");
+ obj.insert("version", int(QT_VERSION));
+#ifdef QT_NO_DEBUG
+ obj.insert("debug", false);
+#else
+ obj.insert("debug", true);
+#endif
+ obj.insert("MetaData", QJsonObject());
+ QTest::newRow("control") << (prefix + QJsonDocument(obj).toBinaryData()) << true;
+ }
+
+ QTest::newRow("zeroes") << prefix << false;
+
+ prefix += "qbjs";
+ QTest::newRow("bad-json-version0") << prefix << false;
+ QTest::newRow("bad-json-version2") << (prefix + QByteArray("\2\0\0\0", 4)) << false;
+
+ // valid qbjs version 1
+ prefix += QByteArray("\1\0\0\0");
+
+ // too large for the file (100 MB)
+ QTest::newRow("bad-json-size-large1") << (prefix + QByteArray("\0\0\x40\x06")) << false;
+
+ // too large for binary JSON (512 MB)
+ QTest::newRow("bad-json-size-large2") << (prefix + QByteArray("\0\0\0\x20")) << false;
+
+ // could overflow
+ QTest::newRow("bad-json-size-large3") << (prefix + "\xff\xff\xff\x7f") << false;
+
+}
+
+static const char invalidPluginSignature[] = "qplugin testfile";
+static qsizetype locateMetadata(const uchar *data, qsizetype len)
+{
+ const uchar *dataend = data + len - strlen(invalidPluginSignature);
+
+ for (const uchar *ptr = data; ptr < dataend; ++ptr) {
+ if (*ptr != invalidPluginSignature[0])
+ continue;
+
+ int r = memcmp(ptr, invalidPluginSignature, strlen(invalidPluginSignature));
+ if (r)
+ continue;
+
+ return ptr - data;
+ }
+
+ return -1;
+}
+
+void tst_QPlugin::scanInvalidPlugin()
+{
+ QVERIFY(!invalidPluginName.isEmpty());
+
+ // copy the file
+ QFileInfo fn(invalidPluginName);
+ QTemporaryDir tmpdir;
+ QVERIFY(tmpdir.isValid());
+
+ QString newName = tmpdir.path() + '/' + fn.fileName();
+ QVERIFY(QFile::copy(invalidPluginName, newName));
+
+ {
+ QFile f(newName);
+ QVERIFY(f.open(QIODevice::ReadWrite | QIODevice::Unbuffered));
+ QVERIFY(f.size() > qint64(strlen(invalidPluginSignature)));
+ uchar *data = f.map(0, f.size());
+ QVERIFY(data);
+
+ static const qsizetype offset = locateMetadata(data, f.size());
+ QVERIFY(offset > 0);
+
+ QFETCH(QByteArray, metadata);
+
+ // sanity check
+ QVERIFY(metadata.size() < 512);
+
+ // replace the data
+ memcpy(data + offset, metadata.constData(), metadata.size());
+ memset(data + offset + metadata.size(), 0, 512 - metadata.size());
+ }
+
+ // now try to load this
+ QFETCH(bool, loads);
+ QPluginLoader loader(newName);
+ QCOMPARE(loader.load(), loads);
+ if (loads)
+ loader.unload();
+}
+
QTEST_MAIN(tst_QPlugin)
#include "tst_qplugin.moc"
diff --git a/tests/auto/corelib/plugin/qpluginloader/tst_qpluginloader.cpp b/tests/auto/corelib/plugin/qpluginloader/tst_qpluginloader.cpp
index a496ed318b..f34741281c 100644
--- a/tests/auto/corelib/plugin/qpluginloader/tst_qpluginloader.cpp
+++ b/tests/auto/corelib/plugin/qpluginloader/tst_qpluginloader.cpp
@@ -186,7 +186,7 @@ void tst_QPluginLoader::errorString()
QVERIFY(!unloaded);
}
-#if !defined Q_OS_WIN && !defined Q_OS_MAC && !defined Q_OS_HPUX
+#if !defined(Q_OS_WIN) && !defined(Q_OS_MAC) && !defined(Q_OS_HPUX)
{
QPluginLoader loader( sys_qualifiedLibraryName("almostplugin")); //a plugin with unresolved symbols
loader.setLoadHints(QLibrary::ResolveAllSymbolsHint);
diff --git a/tests/auto/corelib/tools/qcollator/tst_qcollator.cpp b/tests/auto/corelib/tools/qcollator/tst_qcollator.cpp
index 480e723f44..00b22dab6c 100644
--- a/tests/auto/corelib/tools/qcollator/tst_qcollator.cpp
+++ b/tests/auto/corelib/tools/qcollator/tst_qcollator.cpp
@@ -72,6 +72,9 @@ void tst_QCollator::moveSemantics()
QCOMPARE(c2.locale(), de_AT);
QVERIFY(dpointer_is_null(c1));
+ QCollator c3(c1);
+ QVERIFY(dpointer_is_null(c3));
+
c1 = std::move(c2);
QCOMPARE(c1.locale(), de_AT);
QVERIFY(dpointer_is_null(c2));
diff --git a/tests/auto/widgets/styles/qstylesheetstyle/tst_qstylesheetstyle.cpp b/tests/auto/widgets/styles/qstylesheetstyle/tst_qstylesheetstyle.cpp
index 6a4d972baf..43aec651fe 100644
--- a/tests/auto/widgets/styles/qstylesheetstyle/tst_qstylesheetstyle.cpp
+++ b/tests/auto/widgets/styles/qstylesheetstyle/tst_qstylesheetstyle.cpp
@@ -99,6 +99,7 @@ private slots:
void appStyle();
void QTBUG11658_cachecrash();
void styleSheetTargetAttribute();
+ void unpolish();
private:
QColor COLOR(const QWidget& w) {
@@ -2054,6 +2055,17 @@ void tst_QStyleSheetStyle::styleSheetTargetAttribute()
QCOMPARE(pb.testAttribute(Qt::WA_StyleSheetTarget), false);
}
+void tst_QStyleSheetStyle::unpolish()
+{
+ QWidget w;
+ QCOMPARE(w.minimumWidth(), 0);
+ w.setStyleSheet("QWidget { min-width: 100; }");
+ w.ensurePolished();
+ QCOMPARE(w.minimumWidth(), 100);
+ w.setStyleSheet("");
+ QCOMPARE(w.minimumWidth(), 0);
+}
+
QTEST_MAIN(tst_QStyleSheetStyle)
#include "tst_qstylesheetstyle.moc"