summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMikhail R. Gadelha <mikhail.ramalho@gmail.com>2019-02-09 00:46:12 +0000
committerMikhail R. Gadelha <mikhail.ramalho@gmail.com>2019-02-09 00:46:12 +0000
commit7b7f1a3de9580d40d78b2e9e2d21f08adcab1362 (patch)
tree005918a5703ead59866db4e4f47c9eaacc235d8f
parente1200565f6a11929c952254b039606440231db46 (diff)
This reverts commit 1440a848a635849b97f7a5cfa0ecc40d37451f5b.
and commit a1853e834c65751f92521f7481b15cf0365e796b. They broke arm and aarch64 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@353590 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--CMakeLists.txt27
-rw-r--r--cmake/modules/FindZ3.cmake51
-rw-r--r--include/clang/Config/config.h.cmake3
-rw-r--r--include/clang/StaticAnalyzer/Core/PathSensitive/SMTAPI.h404
-rw-r--r--include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h25
-rw-r--r--include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h227
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp4
-rw-r--r--lib/StaticAnalyzer/Core/CMakeLists.txt16
-rw-r--r--lib/StaticAnalyzer/Core/SMTConstraintManager.cpp18
-rw-r--r--lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp835
-rw-r--r--test/CMakeLists.txt2
-rw-r--r--test/lit.site.cfg.py.in2
12 files changed, 1457 insertions, 157 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2c5ea58610..c2016a45ca 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -411,9 +411,34 @@ option(CLANG_BUILD_TOOLS
option(CLANG_ENABLE_ARCMT "Build ARCMT." ON)
option(CLANG_ENABLE_STATIC_ANALYZER "Build static analyzer." ON)
+set(CLANG_ANALYZER_Z3_INSTALL_DIR "" CACHE STRING "Install directory of the Z3 solver.")
+
+find_package(Z3 4.7.1)
+
+if (CLANG_ANALYZER_Z3_INSTALL_DIR)
+ if (NOT Z3_FOUND)
+ message(FATAL_ERROR "Z3 4.7.1 has not been found in CLANG_ANALYZER_Z3_INSTALL_DIR: ${CLANG_ANALYZER_Z3_INSTALL_DIR}.")
+ endif()
+endif()
+
+set(CLANG_ANALYZER_ENABLE_Z3_SOLVER_DEFAULT "${Z3_FOUND}")
+
+option(CLANG_ANALYZER_ENABLE_Z3_SOLVER
+ "Enable Support for the Z3 constraint solver in the Clang Static Analyzer."
+ ${CLANG_ANALYZER_ENABLE_Z3_SOLVER_DEFAULT}
+)
+
+if (CLANG_ANALYZER_ENABLE_Z3_SOLVER)
+ if (NOT Z3_FOUND)
+ message(FATAL_ERROR "CLANG_ANALYZER_ENABLE_Z3_SOLVER cannot be enabled when Z3 is not available.")
+ endif()
+
+ set(CLANG_ANALYZER_WITH_Z3 1)
+endif()
+
option(CLANG_ENABLE_PROTO_FUZZER "Build Clang protobuf fuzzer." OFF)
-if(NOT CLANG_ENABLE_STATIC_ANALYZER AND CLANG_ENABLE_ARCMT)
+if(NOT CLANG_ENABLE_STATIC_ANALYZER AND (CLANG_ENABLE_ARCMT OR CLANG_ANALYZER_ENABLE_Z3_SOLVER))
message(FATAL_ERROR "Cannot disable static analyzer while enabling ARCMT or Z3")
endif()
diff --git a/cmake/modules/FindZ3.cmake b/cmake/modules/FindZ3.cmake
new file mode 100644
index 0000000000..7a224f789e
--- /dev/null
+++ b/cmake/modules/FindZ3.cmake
@@ -0,0 +1,51 @@
+# Looking for Z3 in CLANG_ANALYZER_Z3_INSTALL_DIR
+find_path(Z3_INCLUDE_DIR NAMES z3.h
+ NO_DEFAULT_PATH
+ PATHS ${CLANG_ANALYZER_Z3_INSTALL_DIR}/include
+ PATH_SUFFIXES libz3 z3
+ )
+
+find_library(Z3_LIBRARIES NAMES z3 libz3
+ NO_DEFAULT_PATH
+ PATHS ${CLANG_ANALYZER_Z3_INSTALL_DIR}
+ PATH_SUFFIXES lib bin
+ )
+
+find_program(Z3_EXECUTABLE z3
+ NO_DEFAULT_PATH
+ PATHS ${CLANG_ANALYZER_Z3_INSTALL_DIR}
+ PATH_SUFFIXES bin
+ )
+
+# If Z3 has not been found in CLANG_ANALYZER_Z3_INSTALL_DIR look in the default directories
+find_path(Z3_INCLUDE_DIR NAMES z3.h
+ PATH_SUFFIXES libz3 z3
+ )
+
+find_library(Z3_LIBRARIES NAMES z3 libz3
+ PATH_SUFFIXES lib bin
+ )
+
+find_program(Z3_EXECUTABLE z3
+ PATH_SUFFIXES bin
+ )
+
+if(Z3_INCLUDE_DIR AND Z3_LIBRARIES AND Z3_EXECUTABLE)
+ execute_process (COMMAND ${Z3_EXECUTABLE} -version
+ OUTPUT_VARIABLE libz3_version_str
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+
+ string(REGEX REPLACE "^Z3 version ([0-9.]+)" "\\1"
+ Z3_VERSION_STRING "${libz3_version_str}")
+ unset(libz3_version_str)
+endif()
+
+# handle the QUIETLY and REQUIRED arguments and set Z3_FOUND to TRUE if
+# all listed variables are TRUE
+include(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(Z3
+ REQUIRED_VARS Z3_LIBRARIES Z3_INCLUDE_DIR
+ VERSION_VAR Z3_VERSION_STRING)
+
+mark_as_advanced(Z3_INCLUDE_DIR Z3_LIBRARIES)
diff --git a/include/clang/Config/config.h.cmake b/include/clang/Config/config.h.cmake
index a737d6afac..1d624450b9 100644
--- a/include/clang/Config/config.h.cmake
+++ b/include/clang/Config/config.h.cmake
@@ -54,6 +54,9 @@
/* Define if we have libxml2 */
#cmakedefine CLANG_HAVE_LIBXML ${CLANG_HAVE_LIBXML}
+/* Define if we have z3 and want to build it */
+#cmakedefine CLANG_ANALYZER_WITH_Z3 ${CLANG_ANALYZER_WITH_Z3}
+
/* Define if we have sys/resource.h (rlimits) */
#cmakedefine CLANG_HAVE_RLIMITS ${CLANG_HAVE_RLIMITS}
diff --git a/include/clang/StaticAnalyzer/Core/PathSensitive/SMTAPI.h b/include/clang/StaticAnalyzer/Core/PathSensitive/SMTAPI.h
new file mode 100644
index 0000000000..9bc3a5e7e9
--- /dev/null
+++ b/include/clang/StaticAnalyzer/Core/PathSensitive/SMTAPI.h
@@ -0,0 +1,404 @@
+//===- SMTAPI.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a SMT generic Solver API, which will be the base class
+// for every SMT solver specific class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_SMTSOLVER_H
+#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_SMTSOLVER_H
+
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/APSInt.h"
+
+namespace clang {
+namespace ento {
+
+/// Generic base class for SMT sorts
+class SMTSort {
+public:
+ SMTSort() = default;
+ virtual ~SMTSort() = default;
+
+ /// Returns true if the sort is a bitvector, calls isBitvectorSortImpl().
+ virtual bool isBitvectorSort() const { return isBitvectorSortImpl(); }
+
+ /// Returns true if the sort is a floating-point, calls isFloatSortImpl().
+ virtual bool isFloatSort() const { return isFloatSortImpl(); }
+
+ /// Returns true if the sort is a boolean, calls isBooleanSortImpl().
+ virtual bool isBooleanSort() const { return isBooleanSortImpl(); }
+
+ /// Returns the bitvector size, fails if the sort is not a bitvector
+ /// Calls getBitvectorSortSizeImpl().
+ virtual unsigned getBitvectorSortSize() const {
+ assert(isBitvectorSort() && "Not a bitvector sort!");
+ unsigned Size = getBitvectorSortSizeImpl();
+ assert(Size && "Size is zero!");
+ return Size;
+ };
+
+ /// Returns the floating-point size, fails if the sort is not a floating-point
+ /// Calls getFloatSortSizeImpl().
+ virtual unsigned getFloatSortSize() const {
+ assert(isFloatSort() && "Not a floating-point sort!");
+ unsigned Size = getFloatSortSizeImpl();
+ assert(Size && "Size is zero!");
+ return Size;
+ };
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const = 0;
+
+ bool operator<(const SMTSort &Other) const {
+ llvm::FoldingSetNodeID ID1, ID2;
+ Profile(ID1);
+ Other.Profile(ID2);
+ return ID1 < ID2;
+ }
+
+ friend bool operator==(SMTSort const &LHS, SMTSort const &RHS) {
+ return LHS.equal_to(RHS);
+ }
+
+ virtual void print(raw_ostream &OS) const = 0;
+
+ LLVM_DUMP_METHOD void dump() const { print(llvm::errs()); }
+
+protected:
+ /// Query the SMT solver and returns true if two sorts are equal (same kind
+ /// and bit width). This does not check if the two sorts are the same objects.
+ virtual bool equal_to(SMTSort const &other) const = 0;
+
+ /// Query the SMT solver and checks if a sort is bitvector.
+ virtual bool isBitvectorSortImpl() const = 0;
+
+ /// Query the SMT solver and checks if a sort is floating-point.
+ virtual bool isFloatSortImpl() const = 0;
+
+ /// Query the SMT solver and checks if a sort is boolean.
+ virtual bool isBooleanSortImpl() const = 0;
+
+ /// Query the SMT solver and returns the sort bit width.
+ virtual unsigned getBitvectorSortSizeImpl() const = 0;
+
+ /// Query the SMT solver and returns the sort bit width.
+ virtual unsigned getFloatSortSizeImpl() const = 0;
+};
+
+/// Shared pointer for SMTSorts, used by SMTSolver API.
+using SMTSortRef = const SMTSort *;
+
+/// Generic base class for SMT exprs
+class SMTExpr {
+public:
+ SMTExpr() = default;
+ virtual ~SMTExpr() = default;
+
+ bool operator<(const SMTExpr &Other) const {
+ llvm::FoldingSetNodeID ID1, ID2;
+ Profile(ID1);
+ Other.Profile(ID2);
+ return ID1 < ID2;
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const = 0;
+
+ friend bool operator==(SMTExpr const &LHS, SMTExpr const &RHS) {
+ return LHS.equal_to(RHS);
+ }
+
+ virtual void print(raw_ostream &OS) const = 0;
+
+ LLVM_DUMP_METHOD void dump() const { print(llvm::errs()); }
+
+protected:
+ /// Query the SMT solver and returns true if two sorts are equal (same kind
+ /// and bit width). This does not check if the two sorts are the same objects.
+ virtual bool equal_to(SMTExpr const &other) const = 0;
+};
+
+/// Shared pointer for SMTExprs, used by SMTSolver API.
+using SMTExprRef = const SMTExpr *;
+
+/// Generic base class for SMT Solvers
+///
+/// This class is responsible for wrapping all sorts and expression generation,
+/// through the mk* methods. It also provides methods to create SMT expressions
+/// straight from clang's AST, through the from* methods.
+class SMTSolver {
+public:
+ SMTSolver() = default;
+ virtual ~SMTSolver() = default;
+
+ LLVM_DUMP_METHOD void dump() const { print(llvm::errs()); }
+
+ // Returns an appropriate floating-point sort for the given bitwidth.
+ SMTSortRef getFloatSort(unsigned BitWidth) {
+ switch (BitWidth) {
+ case 16:
+ return getFloat16Sort();
+ case 32:
+ return getFloat32Sort();
+ case 64:
+ return getFloat64Sort();
+ case 128:
+ return getFloat128Sort();
+ default:;
+ }
+ llvm_unreachable("Unsupported floating-point bitwidth!");
+ }
+
+ // Returns a boolean sort.
+ virtual SMTSortRef getBoolSort() = 0;
+
+ // Returns an appropriate bitvector sort for the given bitwidth.
+ virtual SMTSortRef getBitvectorSort(const unsigned BitWidth) = 0;
+
+ // Returns a floating-point sort of width 16
+ virtual SMTSortRef getFloat16Sort() = 0;
+
+ // Returns a floating-point sort of width 32
+ virtual SMTSortRef getFloat32Sort() = 0;
+
+ // Returns a floating-point sort of width 64
+ virtual SMTSortRef getFloat64Sort() = 0;
+
+ // Returns a floating-point sort of width 128
+ virtual SMTSortRef getFloat128Sort() = 0;
+
+ // Returns an appropriate sort for the given AST.
+ virtual SMTSortRef getSort(const SMTExprRef &AST) = 0;
+
+ /// Given a constraint, adds it to the solver
+ virtual void addConstraint(const SMTExprRef &Exp) const = 0;
+
+ /// Creates a bitvector addition operation
+ virtual SMTExprRef mkBVAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector subtraction operation
+ virtual SMTExprRef mkBVSub(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector multiplication operation
+ virtual SMTExprRef mkBVMul(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector signed modulus operation
+ virtual SMTExprRef mkBVSRem(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector unsigned modulus operation
+ virtual SMTExprRef mkBVURem(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector signed division operation
+ virtual SMTExprRef mkBVSDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector unsigned division operation
+ virtual SMTExprRef mkBVUDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector logical shift left operation
+ virtual SMTExprRef mkBVShl(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector arithmetic shift right operation
+ virtual SMTExprRef mkBVAshr(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector logical shift right operation
+ virtual SMTExprRef mkBVLshr(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector negation operation
+ virtual SMTExprRef mkBVNeg(const SMTExprRef &Exp) = 0;
+
+ /// Creates a bitvector not operation
+ virtual SMTExprRef mkBVNot(const SMTExprRef &Exp) = 0;
+
+ /// Creates a bitvector xor operation
+ virtual SMTExprRef mkBVXor(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector or operation
+ virtual SMTExprRef mkBVOr(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector and operation
+ virtual SMTExprRef mkBVAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector unsigned less-than operation
+ virtual SMTExprRef mkBVUlt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector signed less-than operation
+ virtual SMTExprRef mkBVSlt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector unsigned greater-than operation
+ virtual SMTExprRef mkBVUgt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector signed greater-than operation
+ virtual SMTExprRef mkBVSgt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector unsigned less-equal-than operation
+ virtual SMTExprRef mkBVUle(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector signed less-equal-than operation
+ virtual SMTExprRef mkBVSle(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector unsigned greater-equal-than operation
+ virtual SMTExprRef mkBVUge(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a bitvector signed greater-equal-than operation
+ virtual SMTExprRef mkBVSge(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a boolean not operation
+ virtual SMTExprRef mkNot(const SMTExprRef &Exp) = 0;
+
+ /// Creates a boolean equality operation
+ virtual SMTExprRef mkEqual(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a boolean and operation
+ virtual SMTExprRef mkAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a boolean or operation
+ virtual SMTExprRef mkOr(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a boolean ite operation
+ virtual SMTExprRef mkIte(const SMTExprRef &Cond, const SMTExprRef &T,
+ const SMTExprRef &F) = 0;
+
+ /// Creates a bitvector sign extension operation
+ virtual SMTExprRef mkBVSignExt(unsigned i, const SMTExprRef &Exp) = 0;
+
+ /// Creates a bitvector zero extension operation
+ virtual SMTExprRef mkBVZeroExt(unsigned i, const SMTExprRef &Exp) = 0;
+
+ /// Creates a bitvector extract operation
+ virtual SMTExprRef mkBVExtract(unsigned High, unsigned Low,
+ const SMTExprRef &Exp) = 0;
+
+ /// Creates a bitvector concat operation
+ virtual SMTExprRef mkBVConcat(const SMTExprRef &LHS,
+ const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point negation operation
+ virtual SMTExprRef mkFPNeg(const SMTExprRef &Exp) = 0;
+
+ /// Creates a floating-point isInfinite operation
+ virtual SMTExprRef mkFPIsInfinite(const SMTExprRef &Exp) = 0;
+
+ /// Creates a floating-point isNaN operation
+ virtual SMTExprRef mkFPIsNaN(const SMTExprRef &Exp) = 0;
+
+ /// Creates a floating-point isNormal operation
+ virtual SMTExprRef mkFPIsNormal(const SMTExprRef &Exp) = 0;
+
+ /// Creates a floating-point isZero operation
+ virtual SMTExprRef mkFPIsZero(const SMTExprRef &Exp) = 0;
+
+ /// Creates a floating-point multiplication operation
+ virtual SMTExprRef mkFPMul(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point division operation
+ virtual SMTExprRef mkFPDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point remainder operation
+ virtual SMTExprRef mkFPRem(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point addition operation
+ virtual SMTExprRef mkFPAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point subtraction operation
+ virtual SMTExprRef mkFPSub(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point less-than operation
+ virtual SMTExprRef mkFPLt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point greater-than operation
+ virtual SMTExprRef mkFPGt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point less-than-or-equal operation
+ virtual SMTExprRef mkFPLe(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point greater-than-or-equal operation
+ virtual SMTExprRef mkFPGe(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point equality operation
+ virtual SMTExprRef mkFPEqual(const SMTExprRef &LHS,
+ const SMTExprRef &RHS) = 0;
+
+ /// Creates a floating-point conversion from floatint-point to floating-point
+ /// operation
+ virtual SMTExprRef mkFPtoFP(const SMTExprRef &From, const SMTSortRef &To) = 0;
+
+ /// Creates a floating-point conversion from signed bitvector to
+ /// floatint-point operation
+ virtual SMTExprRef mkSBVtoFP(const SMTExprRef &From,
+ const SMTSortRef &To) = 0;
+
+ /// Creates a floating-point conversion from unsigned bitvector to
+ /// floatint-point operation
+ virtual SMTExprRef mkUBVtoFP(const SMTExprRef &From,
+ const SMTSortRef &To) = 0;
+
+ /// Creates a floating-point conversion from floatint-point to signed
+ /// bitvector operation
+ virtual SMTExprRef mkFPtoSBV(const SMTExprRef &From, unsigned ToWidth) = 0;
+
+ /// Creates a floating-point conversion from floatint-point to unsigned
+ /// bitvector operation
+ virtual SMTExprRef mkFPtoUBV(const SMTExprRef &From, unsigned ToWidth) = 0;
+
+ /// Creates a new symbol, given a name and a sort
+ virtual SMTExprRef mkSymbol(const char *Name, SMTSortRef Sort) = 0;
+
+ // Returns an appropriate floating-point rounding mode.
+ virtual SMTExprRef getFloatRoundingMode() = 0;
+
+ // If the a model is available, returns the value of a given bitvector symbol
+ virtual llvm::APSInt getBitvector(const SMTExprRef &Exp, unsigned BitWidth,
+ bool isUnsigned) = 0;
+
+ // If the a model is available, returns the value of a given boolean symbol
+ virtual bool getBoolean(const SMTExprRef &Exp) = 0;
+
+ /// Constructs an SMTExprRef from a boolean.
+ virtual SMTExprRef mkBoolean(const bool b) = 0;
+
+ /// Constructs an SMTExprRef from a finite APFloat.
+ virtual SMTExprRef mkFloat(const llvm::APFloat Float) = 0;
+
+ /// Constructs an SMTExprRef from an APSInt and its bit width
+ virtual SMTExprRef mkBitvector(const llvm::APSInt Int, unsigned BitWidth) = 0;
+
+ /// Given an expression, extract the value of this operand in the model.
+ virtual bool getInterpretation(const SMTExprRef &Exp, llvm::APSInt &Int) = 0;
+
+ /// Given an expression extract the value of this operand in the model.
+ virtual bool getInterpretation(const SMTExprRef &Exp,
+ llvm::APFloat &Float) = 0;
+
+ /// Check if the constraints are satisfiable
+ virtual Optional<bool> check() const = 0;
+
+ /// Push the current solver state
+ virtual void push() = 0;
+
+ /// Pop the previous solver state
+ virtual void pop(unsigned NumStates = 1) = 0;
+
+ /// Reset the solver and remove all constraints.
+ virtual void reset() = 0;
+
+ /// Checks if the solver supports floating-points.
+ virtual bool isFPSupported() = 0;
+
+ virtual void print(raw_ostream &OS) const = 0;
+};
+
+/// Shared pointer for SMTSolvers.
+using SMTSolverRef = std::shared_ptr<SMTSolver>;
+
+/// Convenience method to create and Z3Solver object
+SMTSolverRef CreateZ3Solver();
+
+} // namespace ento
+} // namespace clang
+
+#endif
diff --git a/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h b/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
index 4d856c0cae..6819e6af86 100644
--- a/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
+++ b/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
@@ -18,7 +18,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
typedef llvm::ImmutableSet<
- std::pair<clang::ento::SymbolRef, const llvm::SMTExpr *>>
+ std::pair<clang::ento::SymbolRef, const clang::ento::SMTExpr *>>
ConstraintSMTTy;
REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintSMT, ConstraintSMTTy)
@@ -26,7 +26,7 @@ namespace clang {
namespace ento {
class SMTConstraintManager : public clang::ento::SimpleConstraintManager {
- mutable llvm::SMTSolverRef Solver = llvm::CreateZ3Solver();
+ mutable SMTSolverRef Solver = CreateZ3Solver();
public:
SMTConstraintManager(clang::ento::SubEngine *SE, clang::ento::SValBuilder &SB)
@@ -44,8 +44,7 @@ public:
QualType RetTy;
bool hasComparison;
- llvm::SMTExprRef Exp =
- SMTConv::getExpr(Solver, Ctx, Sym, &RetTy, &hasComparison);
+ SMTExprRef Exp = SMTConv::getExpr(Solver, Ctx, Sym, &RetTy, &hasComparison);
// Create zero comparison for implicit boolean cast, with reversed
// assumption
@@ -81,12 +80,12 @@ public:
QualType RetTy;
// The expression may be casted, so we cannot call getZ3DataExpr() directly
- llvm::SMTExprRef VarExp = SMTConv::getExpr(Solver, Ctx, Sym, &RetTy);
- llvm::SMTExprRef Exp =
+ SMTExprRef VarExp = SMTConv::getExpr(Solver, Ctx, Sym, &RetTy);
+ SMTExprRef Exp =
SMTConv::getZeroExpr(Solver, Ctx, VarExp, RetTy, /*Assumption=*/true);
// Negate the constraint
- llvm::SMTExprRef NotExp =
+ SMTExprRef NotExp =
SMTConv::getZeroExpr(Solver, Ctx, VarExp, RetTy, /*Assumption=*/false);
ConditionTruthVal isSat = checkModel(State, Sym, Exp);
@@ -119,7 +118,7 @@ public:
// this method tries to get the interpretation (the actual value) from
// the solver, which is currently not cached.
- llvm::SMTExprRef Exp =
+ SMTExprRef Exp =
SMTConv::fromData(Solver, SD->getSymbolID(), Ty, Ctx.getTypeSize(Ty));
Solver->reset();
@@ -135,7 +134,7 @@ public:
return nullptr;
// A value has been obtained, check if it is the only value
- llvm::SMTExprRef NotExp = SMTConv::fromBinOp(
+ SMTExprRef NotExp = SMTConv::fromBinOp(
Solver, Exp, BO_NE,
Ty->isBooleanType() ? Solver->mkBoolean(Value.getBoolValue())
: Solver->mkBitvector(Value, Value.getBitWidth()),
@@ -273,7 +272,7 @@ public:
protected:
// Check whether a new model is satisfiable, and update the program state.
virtual ProgramStateRef assumeExpr(ProgramStateRef State, SymbolRef Sym,
- const llvm::SMTExprRef &Exp) {
+ const SMTExprRef &Exp) {
// Check the model, avoid simplifying AST to save time
if (checkModel(State, Sym, Exp).isConstrainedTrue())
return State->add<ConstraintSMT>(std::make_pair(Sym, Exp));
@@ -290,9 +289,9 @@ protected:
// Construct the logical AND of all the constraints
if (I != IE) {
- std::vector<llvm::SMTExprRef> ASTs;
+ std::vector<SMTExprRef> ASTs;
- llvm::SMTExprRef Constraint = I++->second;
+ SMTExprRef Constraint = I++->second;
while (I != IE) {
Constraint = Solver->mkAnd(Constraint, I++->second);
}
@@ -303,7 +302,7 @@ protected:
// Generate and check a Z3 model, using the given constraint.
ConditionTruthVal checkModel(ProgramStateRef State, SymbolRef Sym,
- const llvm::SMTExprRef &Exp) const {
+ const SMTExprRef &Exp) const {
ProgramStateRef NewState =
State->add<ConstraintSMT>(std::make_pair(Sym, Exp));
diff --git a/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h b/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h
index bdebe23882..f5145699c1 100644
--- a/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h
+++ b/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h
@@ -15,8 +15,8 @@
#include "clang/AST/Expr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTAPI.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "llvm/Support/SMTAPI.h"
namespace clang {
namespace ento {
@@ -24,8 +24,8 @@ namespace ento {
class SMTConv {
public:
// Returns an appropriate sort, given a QualType and it's bit width.
- static inline llvm::SMTSortRef mkSort(llvm::SMTSolverRef &Solver,
- const QualType &Ty, unsigned BitWidth) {
+ static inline SMTSortRef mkSort(SMTSolverRef &Solver, const QualType &Ty,
+ unsigned BitWidth) {
if (Ty->isBooleanType())
return Solver->getBoolSort();
@@ -35,10 +35,10 @@ public:
return Solver->getBitvectorSort(BitWidth);
}
- /// Constructs an SMTSolverRef from an unary operator.
- static inline llvm::SMTExprRef fromUnOp(llvm::SMTSolverRef &Solver,
- const UnaryOperator::Opcode Op,
- const llvm::SMTExprRef &Exp) {
+ /// Constructs an SMTExprRef from an unary operator.
+ static inline SMTExprRef fromUnOp(SMTSolverRef &Solver,
+ const UnaryOperator::Opcode Op,
+ const SMTExprRef &Exp) {
switch (Op) {
case UO_Minus:
return Solver->mkBVNeg(Exp);
@@ -54,10 +54,10 @@ public:
llvm_unreachable("Unimplemented opcode");
}
- /// Constructs an SMTSolverRef from a floating-point unary operator.
- static inline llvm::SMTExprRef fromFloatUnOp(llvm::SMTSolverRef &Solver,
- const UnaryOperator::Opcode Op,
- const llvm::SMTExprRef &Exp) {
+ /// Constructs an SMTExprRef from a floating-point unary operator.
+ static inline SMTExprRef fromFloatUnOp(SMTSolverRef &Solver,
+ const UnaryOperator::Opcode Op,
+ const SMTExprRef &Exp) {
switch (Op) {
case UO_Minus:
return Solver->mkFPNeg(Exp);
@@ -70,28 +70,27 @@ public:
llvm_unreachable("Unimplemented opcode");
}
- /// Construct an SMTSolverRef from a n-ary binary operator.
- static inline llvm::SMTExprRef
- fromNBinOp(llvm::SMTSolverRef &Solver, const BinaryOperator::Opcode Op,
- const std::vector<llvm::SMTExprRef> &ASTs) {
+ /// Construct an SMTExprRef from a n-ary binary operator.
+ static inline SMTExprRef fromNBinOp(SMTSolverRef &Solver,
+ const BinaryOperator::Opcode Op,
+ const std::vector<SMTExprRef> &ASTs) {
assert(!ASTs.empty());
if (Op != BO_LAnd && Op != BO_LOr)
llvm_unreachable("Unimplemented opcode");
- llvm::SMTExprRef res = ASTs.front();
+ SMTExprRef res = ASTs.front();
for (std::size_t i = 1; i < ASTs.size(); ++i)
res = (Op == BO_LAnd) ? Solver->mkAnd(res, ASTs[i])
: Solver->mkOr(res, ASTs[i]);
return res;
}
- /// Construct an SMTSolverRef from a binary operator.
- static inline llvm::SMTExprRef fromBinOp(llvm::SMTSolverRef &Solver,
- const llvm::SMTExprRef &LHS,
- const BinaryOperator::Opcode Op,
- const llvm::SMTExprRef &RHS,
- bool isSigned) {
+ /// Construct an SMTExprRef from a binary operator.
+ static inline SMTExprRef fromBinOp(SMTSolverRef &Solver,
+ const SMTExprRef &LHS,
+ const BinaryOperator::Opcode Op,
+ const SMTExprRef &RHS, bool isSigned) {
assert(*Solver->getSort(LHS) == *Solver->getSort(RHS) &&
"AST's must have the same sort!");
@@ -163,10 +162,9 @@ public:
llvm_unreachable("Unimplemented opcode");
}
- /// Construct an SMTSolverRef from a special floating-point binary
- /// operator.
- static inline llvm::SMTExprRef
- fromFloatSpecialBinOp(llvm::SMTSolverRef &Solver, const llvm::SMTExprRef &LHS,
+ /// Construct an SMTExprRef from a special floating-point binary operator.
+ static inline SMTExprRef
+ fromFloatSpecialBinOp(SMTSolverRef &Solver, const SMTExprRef &LHS,
const BinaryOperator::Opcode Op,
const llvm::APFloat::fltCategory &RHS) {
switch (Op) {
@@ -197,11 +195,11 @@ public:
llvm_unreachable("Unimplemented opcode");
}
- /// Construct an SMTSolverRef from a floating-point binary operator.
- static inline llvm::SMTExprRef fromFloatBinOp(llvm::SMTSolverRef &Solver,
- const llvm::SMTExprRef &LHS,
- const BinaryOperator::Opcode Op,
- const llvm::SMTExprRef &RHS) {
+ /// Construct an SMTExprRef from a floating-point binary operator.
+ static inline SMTExprRef fromFloatBinOp(SMTSolverRef &Solver,
+ const SMTExprRef &LHS,
+ const BinaryOperator::Opcode Op,
+ const SMTExprRef &RHS) {
assert(*Solver->getSort(LHS) == *Solver->getSort(RHS) &&
"AST's must have the same sort!");
@@ -255,13 +253,11 @@ public:
llvm_unreachable("Unimplemented opcode");
}
- /// Construct an SMTSolverRef from a QualType FromTy to a QualType ToTy,
- /// and their bit widths.
- static inline llvm::SMTExprRef fromCast(llvm::SMTSolverRef &Solver,
- const llvm::SMTExprRef &Exp,
- QualType ToTy, uint64_t ToBitWidth,
- QualType FromTy,
- uint64_t FromBitWidth) {
+ /// Construct an SMTExprRef from a QualType FromTy to a QualType ToTy, and
+ /// their bit widths.
+ static inline SMTExprRef fromCast(SMTSolverRef &Solver, const SMTExprRef &Exp,
+ QualType ToTy, uint64_t ToBitWidth,
+ QualType FromTy, uint64_t FromBitWidth) {
if ((FromTy->isIntegralOrEnumerationType() &&
ToTy->isIntegralOrEnumerationType()) ||
(FromTy->isAnyPointerType() ^ ToTy->isAnyPointerType()) ||
@@ -295,7 +291,7 @@ public:
}
if (FromTy->isIntegralOrEnumerationType() && ToTy->isRealFloatingType()) {
- llvm::SMTSortRef Sort = Solver->getFloatSort(ToBitWidth);
+ SMTSortRef Sort = Solver->getFloatSort(ToBitWidth);
return FromTy->isSignedIntegerOrEnumerationType()
? Solver->mkSBVtoFP(Exp, Sort)
: Solver->mkUBVtoFP(Exp, Sort);
@@ -310,7 +306,7 @@ public:
}
// Callback function for doCast parameter on APSInt type.
- static inline llvm::APSInt castAPSInt(llvm::SMTSolverRef &Solver,
+ static inline llvm::APSInt castAPSInt(SMTSolverRef &Solver,
const llvm::APSInt &V, QualType ToTy,
uint64_t ToWidth, QualType FromTy,
uint64_t FromWidth) {
@@ -318,32 +314,30 @@ public:
return TargetType.convert(V);
}
- /// Construct an SMTSolverRef from a SymbolData.
- static inline llvm::SMTExprRef fromData(llvm::SMTSolverRef &Solver,
- const SymbolID ID, const QualType &Ty,
- uint64_t BitWidth) {
+ /// Construct an SMTExprRef from a SymbolData.
+ static inline SMTExprRef fromData(SMTSolverRef &Solver, const SymbolID ID,
+ const QualType &Ty, uint64_t BitWidth) {
llvm::Twine Name = "$" + llvm::Twine(ID);
return Solver->mkSymbol(Name.str().c_str(), mkSort(Solver, Ty, BitWidth));
}
- // Wrapper to generate SMTSolverRef from SymbolCast data.
- static inline llvm::SMTExprRef getCastExpr(llvm::SMTSolverRef &Solver,
- ASTContext &Ctx,
- const llvm::SMTExprRef &Exp,
- QualType FromTy, QualType ToTy) {
+ // Wrapper to generate SMTExprRef from SymbolCast data.
+ static inline SMTExprRef getCastExpr(SMTSolverRef &Solver, ASTContext &Ctx,
+ const SMTExprRef &Exp, QualType FromTy,
+ QualType ToTy) {
return fromCast(Solver, Exp, ToTy, Ctx.getTypeSize(ToTy), FromTy,
Ctx.getTypeSize(FromTy));
}
- // Wrapper to generate SMTSolverRef from unpacked binary symbolic
- // expression. Sets the RetTy parameter. See getSMTSolverRef().
- static inline llvm::SMTExprRef
- getBinExpr(llvm::SMTSolverRef &Solver, ASTContext &Ctx,
- const llvm::SMTExprRef &LHS, QualType LTy,
- BinaryOperator::Opcode Op, const llvm::SMTExprRef &RHS,
- QualType RTy, QualType *RetTy) {
- llvm::SMTExprRef NewLHS = LHS;
- llvm::SMTExprRef NewRHS = RHS;
+ // Wrapper to generate SMTExprRef from unpacked binary symbolic expression.
+ // Sets the RetTy parameter. See getSMTExprRef().
+ static inline SMTExprRef getBinExpr(SMTSolverRef &Solver, ASTContext &Ctx,
+ const SMTExprRef &LHS, QualType LTy,
+ BinaryOperator::Opcode Op,
+ const SMTExprRef &RHS, QualType RTy,
+ QualType *RetTy) {
+ SMTExprRef NewLHS = LHS;
+ SMTExprRef NewRHS = RHS;
doTypeConversion(Solver, Ctx, NewLHS, NewRHS, LTy, RTy);
// Update the return type parameter if the output type has changed.
@@ -371,40 +365,36 @@ public:
LTy->isSignedIntegerOrEnumerationType());
}
- // Wrapper to generate SMTSolverRef from BinarySymExpr.
- // Sets the hasComparison and RetTy parameters. See getSMTSolverRef().
- static inline llvm::SMTExprRef getSymBinExpr(llvm::SMTSolverRef &Solver,
- ASTContext &Ctx,
- const BinarySymExpr *BSE,
- bool *hasComparison,
- QualType *RetTy) {
+ // Wrapper to generate SMTExprRef from BinarySymExpr.
+ // Sets the hasComparison and RetTy parameters. See getSMTExprRef().
+ static inline SMTExprRef getSymBinExpr(SMTSolverRef &Solver, ASTContext &Ctx,
+ const BinarySymExpr *BSE,
+ bool *hasComparison, QualType *RetTy) {
QualType LTy, RTy;
BinaryOperator::Opcode Op = BSE->getOpcode();
if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(BSE)) {
- llvm::SMTExprRef LHS =
+ SMTExprRef LHS =
getSymExpr(Solver, Ctx, SIE->getLHS(), &LTy, hasComparison);
llvm::APSInt NewRInt;
std::tie(NewRInt, RTy) = fixAPSInt(Ctx, SIE->getRHS());
- llvm::SMTExprRef RHS =
- Solver->mkBitvector(NewRInt, NewRInt.getBitWidth());
+ SMTExprRef RHS = Solver->mkBitvector(NewRInt, NewRInt.getBitWidth());
return getBinExpr(Solver, Ctx, LHS, LTy, Op, RHS, RTy, RetTy);
}
if (const IntSymExpr *ISE = dyn_cast<IntSymExpr>(BSE)) {
llvm::APSInt NewLInt;
std::tie(NewLInt, LTy) = fixAPSInt(Ctx, ISE->getLHS());
- llvm::SMTExprRef LHS =
- Solver->mkBitvector(NewLInt, NewLInt.getBitWidth());
- llvm::SMTExprRef RHS =
+ SMTExprRef LHS = Solver->mkBitvector(NewLInt, NewLInt.getBitWidth());
+ SMTExprRef RHS =
getSymExpr(Solver, Ctx, ISE->getRHS(), &RTy, hasComparison);
return getBinExpr(Solver, Ctx, LHS, LTy, Op, RHS, RTy, RetTy);
}
if (const SymSymExpr *SSM = dyn_cast<SymSymExpr>(BSE)) {
- llvm::SMTExprRef LHS =
+ SMTExprRef LHS =
getSymExpr(Solver, Ctx, SSM->getLHS(), &LTy, hasComparison);
- llvm::SMTExprRef RHS =
+ SMTExprRef RHS =
getSymExpr(Solver, Ctx, SSM->getRHS(), &RTy, hasComparison);
return getBinExpr(Solver, Ctx, LHS, LTy, Op, RHS, RTy, RetTy);
}
@@ -414,10 +404,9 @@ public:
// Recursive implementation to unpack and generate symbolic expression.
// Sets the hasComparison and RetTy parameters. See getExpr().
- static inline llvm::SMTExprRef getSymExpr(llvm::SMTSolverRef &Solver,
- ASTContext &Ctx, SymbolRef Sym,
- QualType *RetTy,
- bool *hasComparison) {
+ static inline SMTExprRef getSymExpr(SMTSolverRef &Solver, ASTContext &Ctx,
+ SymbolRef Sym, QualType *RetTy,
+ bool *hasComparison) {
if (const SymbolData *SD = dyn_cast<SymbolData>(Sym)) {
if (RetTy)
*RetTy = Sym->getType();
@@ -431,7 +420,7 @@ public:
*RetTy = Sym->getType();
QualType FromTy;
- llvm::SMTExprRef Exp =
+ SMTExprRef Exp =
getSymExpr(Solver, Ctx, SC->getOperand(), &FromTy, hasComparison);
// Casting an expression with a comparison invalidates it. Note that this
@@ -443,8 +432,7 @@ public:
}
if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
- llvm::SMTExprRef Exp =
- getSymBinExpr(Solver, Ctx, BSE, hasComparison, RetTy);
+ SMTExprRef Exp = getSymBinExpr(Solver, Ctx, BSE, hasComparison, RetTy);
// Set the hasComparison parameter, in post-order traversal order.
if (hasComparison)
*hasComparison = BinaryOperator::isComparisonOp(BSE->getOpcode());
@@ -454,14 +442,13 @@ public:
llvm_unreachable("Unsupported SymbolRef type!");
}
- // Generate an SMTSolverRef that represents the given symbolic expression.
+ // Generate an SMTExprRef that represents the given symbolic expression.
// Sets the hasComparison parameter if the expression has a comparison
// operator. Sets the RetTy parameter to the final return type after
// promotions and casts.
- static inline llvm::SMTExprRef getExpr(llvm::SMTSolverRef &Solver,
- ASTContext &Ctx, SymbolRef Sym,
- QualType *RetTy = nullptr,
- bool *hasComparison = nullptr) {
+ static inline SMTExprRef getExpr(SMTSolverRef &Solver, ASTContext &Ctx,
+ SymbolRef Sym, QualType *RetTy = nullptr,
+ bool *hasComparison = nullptr) {
if (hasComparison) {
*hasComparison = false;
}
@@ -469,11 +456,11 @@ public:
return getSymExpr(Solver, Ctx, Sym, RetTy, hasComparison);
}
- // Generate an SMTSolverRef that compares the expression to zero.
- static inline llvm::SMTExprRef getZeroExpr(llvm::SMTSolverRef &Solver,
- ASTContext &Ctx,
- const llvm::SMTExprRef &Exp,
- QualType Ty, bool Assumption) {
+ // Generate an SMTExprRef that compares the expression to zero.
+ static inline SMTExprRef getZeroExpr(SMTSolverRef &Solver, ASTContext &Ctx,
+ const SMTExprRef &Exp, QualType Ty,
+ bool Assumption) {
+
if (Ty->isRealFloatingType()) {
llvm::APFloat Zero =
llvm::APFloat::getZero(Ctx.getFloatTypeSemantics(Ty));
@@ -498,21 +485,21 @@ public:
llvm_unreachable("Unsupported type for zero value!");
}
- // Wrapper to generate SMTSolverRef from a range. If From == To, an
- // equality will be created instead.
- static inline llvm::SMTExprRef
- getRangeExpr(llvm::SMTSolverRef &Solver, ASTContext &Ctx, SymbolRef Sym,
- const llvm::APSInt &From, const llvm::APSInt &To, bool InRange) {
+ // Wrapper to generate SMTExprRef from a range. If From == To, an equality
+ // will be created instead.
+ static inline SMTExprRef getRangeExpr(SMTSolverRef &Solver, ASTContext &Ctx,
+ SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, bool InRange) {
// Convert lower bound
QualType FromTy;
llvm::APSInt NewFromInt;
std::tie(NewFromInt, FromTy) = fixAPSInt(Ctx, From);
- llvm::SMTExprRef FromExp =
+ SMTExprRef FromExp =
Solver->mkBitvector(NewFromInt, NewFromInt.getBitWidth());
// Convert symbol
QualType SymTy;
- llvm::SMTExprRef Exp = getExpr(Solver, Ctx, Sym, &SymTy);
+ SMTExprRef Exp = getExpr(Solver, Ctx, Sym, &SymTy);
// Construct single (in)equality
if (From == To)
@@ -522,17 +509,16 @@ public:
QualType ToTy;
llvm::APSInt NewToInt;
std::tie(NewToInt, ToTy) = fixAPSInt(Ctx, To);
- llvm::SMTExprRef ToExp =
- Solver->mkBitvector(NewToInt, NewToInt.getBitWidth());
+ SMTExprRef ToExp = Solver->mkBitvector(NewToInt, NewToInt.getBitWidth());
assert(FromTy == ToTy && "Range values have different types!");
// Construct two (in)equalities, and a logical and/or
- llvm::SMTExprRef LHS =
+ SMTExprRef LHS =
getBinExpr(Solver, Ctx, Exp, SymTy, InRange ? BO_GE : BO_LT, FromExp,
FromTy, /*RetTy=*/nullptr);
- llvm::SMTExprRef RHS = getBinExpr(Solver, Ctx, Exp, SymTy,
- InRange ? BO_LE : BO_GT, ToExp, ToTy,
- /*RetTy=*/nullptr);
+ SMTExprRef RHS = getBinExpr(Solver, Ctx, Exp, SymTy,
+ InRange ? BO_LE : BO_GT, ToExp, ToTy,
+ /*RetTy=*/nullptr);
return fromBinOp(Solver, LHS, InRange ? BO_LAnd : BO_LOr, RHS,
SymTy->isSignedIntegerOrEnumerationType());
@@ -564,24 +550,23 @@ public:
// Perform implicit type conversion on binary symbolic expressions.
// May modify all input parameters.
// TODO: Refactor to use built-in conversion functions
- static inline void doTypeConversion(llvm::SMTSolverRef &Solver,
- ASTContext &Ctx, llvm::SMTExprRef &LHS,
- llvm::SMTExprRef &RHS, QualType &LTy,
- QualType &RTy) {
+ static inline void doTypeConversion(SMTSolverRef &Solver, ASTContext &Ctx,
+ SMTExprRef &LHS, SMTExprRef &RHS,
+ QualType &LTy, QualType &RTy) {
assert(!LTy.isNull() && !RTy.isNull() && "Input type is null!");
// Perform type conversion
if ((LTy->isIntegralOrEnumerationType() &&
RTy->isIntegralOrEnumerationType()) &&
(LTy->isArithmeticType() && RTy->isArithmeticType())) {
- SMTConv::doIntTypeConversion<llvm::SMTExprRef, &fromCast>(
- Solver, Ctx, LHS, LTy, RHS, RTy);
+ SMTConv::doIntTypeConversion<SMTExprRef, &fromCast>(Solver, Ctx, LHS, LTy,
+ RHS, RTy);
return;
}
if (LTy->isRealFloatingType() || RTy->isRealFloatingType()) {
- SMTConv::doFloatTypeConversion<llvm::SMTExprRef, &fromCast>(
- Solver, Ctx, LHS, LTy, RHS, RTy);
+ SMTConv::doFloatTypeConversion<SMTExprRef, &fromCast>(Solver, Ctx, LHS,
+ LTy, RHS, RTy);
return;
}
@@ -639,11 +624,12 @@ public:
// Perform implicit integer type conversion.
// May modify all input parameters.
// TODO: Refactor to use Sema::handleIntegerConversion()
- template <typename T, T (*doCast)(llvm::SMTSolverRef &Solver, const T &,
- QualType, uint64_t, QualType, uint64_t)>
- static inline void doIntTypeConversion(llvm::SMTSolverRef &Solver,
- ASTContext &Ctx, T &LHS, QualType &LTy,
- T &RHS, QualType &RTy) {
+ template <typename T, T (*doCast)(SMTSolverRef &Solver, const T &, QualType,
+ uint64_t, QualType, uint64_t)>
+ static inline void doIntTypeConversion(SMTSolverRef &Solver, ASTContext &Ctx,
+ T &LHS, QualType &LTy, T &RHS,
+ QualType &RTy) {
+
uint64_t LBitWidth = Ctx.getTypeSize(LTy);
uint64_t RBitWidth = Ctx.getTypeSize(RTy);
@@ -721,11 +707,12 @@ public:
// Perform implicit floating-point type conversion.
// May modify all input parameters.
// TODO: Refactor to use Sema::handleFloatConversion()
- template <typename T, T (*doCast)(llvm::SMTSolverRef &Solver, const T &,
- QualType, uint64_t, QualType, uint64_t)>
+ template <typename T, T (*doCast)(SMTSolverRef &Solver, const T &, QualType,
+ uint64_t, QualType, uint64_t)>
static inline void
- doFloatTypeConversion(llvm::SMTSolverRef &Solver, ASTContext &Ctx, T &LHS,
+ doFloatTypeConversion(SMTSolverRef &Solver, ASTContext &Ctx, T &LHS,
QualType &LTy, T &RHS, QualType &RTy) {
+
uint64_t LBitWidth = Ctx.getTypeSize(LTy);
uint64_t RBitWidth = Ctx.getTypeSize(RTy);
@@ -762,4 +749,4 @@ public:
} // namespace ento
} // namespace clang
-#endif
+#endif \ No newline at end of file
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 7d0a8c6da1..00dfd0257f 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -2411,7 +2411,7 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
VisitNode(EndPathNode, BRC, BR);
// Create a refutation manager
- llvm::SMTSolverRef RefutationSolver = llvm::CreateZ3Solver();
+ SMTSolverRef RefutationSolver = CreateZ3Solver();
ASTContext &Ctx = BRC.getASTContext();
// Add constraints to the solver
@@ -2419,7 +2419,7 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
const SymbolRef Sym = I.first;
auto RangeIt = I.second.begin();
- llvm::SMTExprRef Constraints = SMTConv::getRangeExpr(
+ SMTExprRef Constraints = SMTConv::getRangeExpr(
RefutationSolver, Ctx, Sym, RangeIt->From(), RangeIt->To(),
/*InRange=*/true);
while ((++RangeIt) != I.second.end()) {
diff --git a/lib/StaticAnalyzer/Core/CMakeLists.txt b/lib/StaticAnalyzer/Core/CMakeLists.txt
index 977e947933..80047f4908 100644
--- a/lib/StaticAnalyzer/Core/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Core/CMakeLists.txt
@@ -1,5 +1,12 @@
set(LLVM_LINK_COMPONENTS support)
+# Link Z3 if the user wants to build it.
+if(CLANG_ANALYZER_WITH_Z3)
+ set(Z3_LINK_FILES ${Z3_LIBRARIES})
+else()
+ set(Z3_LINK_FILES "")
+endif()
+
add_clang_library(clangStaticAnalyzerCore
APSIntType.cpp
AnalysisManager.cpp
@@ -39,7 +46,6 @@ add_clang_library(clangStaticAnalyzerCore
SarifDiagnostics.cpp
SimpleConstraintManager.cpp
SimpleSValBuilder.cpp
- SMTConstraintManager.cpp
Store.cpp
SubEngine.cpp
SValBuilder.cpp
@@ -47,6 +53,7 @@ add_clang_library(clangStaticAnalyzerCore
SymbolManager.cpp
TaintManager.cpp
WorkList.cpp
+ Z3ConstraintManager.cpp
LINK_LIBS
clangAST
@@ -56,5 +63,12 @@ add_clang_library(clangStaticAnalyzerCore
clangCrossTU
clangLex
clangRewrite
+ ${Z3_LINK_FILES}
)
+if(CLANG_ANALYZER_WITH_Z3)
+ target_include_directories(clangStaticAnalyzerCore SYSTEM
+ PRIVATE
+ ${Z3_INCLUDE_DIR}
+ )
+endif()
diff --git a/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp b/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
deleted file mode 100644
index d5c14351d3..0000000000
--- a/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-//== SMTConstraintManager.cpp -----------------------------------*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
-
-using namespace clang;
-using namespace ento;
-
-std::unique_ptr<ConstraintManager>
-ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
- return llvm::make_unique<SMTConstraintManager>(Eng, StMgr.getSValBuilder());
-}
diff --git a/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp b/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
new file mode 100644
index 0000000000..73474cc9cd
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
@@ -0,0 +1,835 @@
+//== Z3ConstraintManager.cpp --------------------------------*- C++ -*--==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
+
+#include "clang/Config/config.h"
+
+using namespace clang;
+using namespace ento;
+
+#if CLANG_ANALYZER_WITH_Z3
+
+#include <z3.h>
+
+namespace {
+
+/// Configuration class for Z3
+class Z3Config {
+ friend class Z3Context;
+
+ Z3_config Config;
+
+public:
+ Z3Config() : Config(Z3_mk_config()) {
+ // Enable model finding
+ Z3_set_param_value(Config, "model", "true");
+ // Disable proof generation
+ Z3_set_param_value(Config, "proof", "false");
+ // Set timeout to 15000ms = 15s
+ Z3_set_param_value(Config, "timeout", "15000");
+ }
+
+ ~Z3Config() { Z3_del_config(Config); }
+}; // end class Z3Config
+
+// Function used to report errors
+void Z3ErrorHandler(Z3_context Context, Z3_error_code Error) {
+ llvm::report_fatal_error("Z3 error: " +
+ llvm::Twine(Z3_get_error_msg(Context, Error)));
+}
+
+/// Wrapper for Z3 context
+class Z3Context {
+public:
+ Z3_context Context;
+
+ Z3Context() {
+ Context = Z3_mk_context_rc(Z3Config().Config);
+ // The error function is set here because the context is the first object
+ // created by the backend
+ Z3_set_error_handler(Context, Z3ErrorHandler);
+ }
+
+ virtual ~Z3Context() {
+ Z3_del_context(Context);
+ Context = nullptr;
+ }
+}; // end class Z3Context
+
+/// Wrapper for Z3 Sort
+class Z3Sort : public SMTSort {
+ friend class Z3Solver;
+
+ Z3Context &Context;
+
+ Z3_sort Sort;
+
+public:
+ /// Default constructor, mainly used by make_shared
+ Z3Sort(Z3Context &C, Z3_sort ZS) : Context(C), Sort(ZS) {
+ Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
+ }
+
+ /// Override implicit copy constructor for correct reference counting.
+ Z3Sort(const Z3Sort &Other) : Context(Other.Context), Sort(Other.Sort) {
+ Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
+ }
+
+ /// Override implicit copy assignment constructor for correct reference
+ /// counting.
+ Z3Sort &operator=(const Z3Sort &Other) {
+ Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Other.Sort));
+ Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
+ Sort = Other.Sort;
+ return *this;
+ }
+
+ Z3Sort(Z3Sort &&Other) = delete;
+ Z3Sort &operator=(Z3Sort &&Other) = delete;
+
+ ~Z3Sort() {
+ if (Sort)
+ Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(
+ Z3_get_ast_id(Context.Context, reinterpret_cast<Z3_ast>(Sort)));
+ }
+
+ bool isBitvectorSortImpl() const override {
+ return (Z3_get_sort_kind(Context.Context, Sort) == Z3_BV_SORT);
+ }
+
+ bool isFloatSortImpl() const override {
+ return (Z3_get_sort_kind(Context.Context, Sort) == Z3_FLOATING_POINT_SORT);
+ }
+
+ bool isBooleanSortImpl() const override {
+ return (Z3_get_sort_kind(Context.Context, Sort) == Z3_BOOL_SORT);
+ }
+
+ unsigned getBitvectorSortSizeImpl() const override {
+ return Z3_get_bv_sort_size(Context.Context, Sort);
+ }
+
+ unsigned getFloatSortSizeImpl() const override {
+ return Z3_fpa_get_ebits(Context.Context, Sort) +
+ Z3_fpa_get_sbits(Context.Context, Sort);
+ }
+
+ bool equal_to(SMTSort const &Other) const override {
+ return Z3_is_eq_sort(Context.Context, Sort,
+ static_cast<const Z3Sort &>(Other).Sort);
+ }
+
+ void print(raw_ostream &OS) const override {
+ OS << Z3_sort_to_string(Context.Context, Sort);
+ }
+}; // end class Z3Sort
+
+static const Z3Sort &toZ3Sort(const SMTSort &S) {
+ return static_cast<const Z3Sort &>(S);
+}
+
+class Z3Expr : public SMTExpr {
+ friend class Z3Solver;
+
+ Z3Context &Context;
+
+ Z3_ast AST;
+
+public:
+ Z3Expr(Z3Context &C, Z3_ast ZA) : SMTExpr(), Context(C), AST(ZA) {
+ Z3_inc_ref(Context.Context, AST);
+ }
+
+ /// Override implicit copy constructor for correct reference counting.
+ Z3Expr(const Z3Expr &Copy) : SMTExpr(), Context(Copy.Context), AST(Copy.AST) {
+ Z3_inc_ref(Context.Context, AST);
+ }
+
+ /// Override implicit copy assignment constructor for correct reference
+ /// counting.
+ Z3Expr &operator=(const Z3Expr &Other) {
+ Z3_inc_ref(Context.Context, Other.AST);
+ Z3_dec_ref(Context.Context, AST);
+ AST = Other.AST;
+ return *this;
+ }
+
+ Z3Expr(Z3Expr &&Other) = delete;
+ Z3Expr &operator=(Z3Expr &&Other) = delete;
+
+ ~Z3Expr() {
+ if (AST)
+ Z3_dec_ref(Context.Context, AST);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ ID.AddInteger(Z3_get_ast_id(Context.Context, AST));
+ }
+
+ /// Comparison of AST equality, not model equivalence.
+ bool equal_to(SMTExpr const &Other) const override {
+ assert(Z3_is_eq_sort(Context.Context, Z3_get_sort(Context.Context, AST),
+ Z3_get_sort(Context.Context,
+ static_cast<const Z3Expr &>(Other).AST)) &&
+ "AST's must have the same sort");
+ return Z3_is_eq_ast(Context.Context, AST,
+ static_cast<const Z3Expr &>(Other).AST);
+ }
+
+ void print(raw_ostream &OS) const override {
+ OS << Z3_ast_to_string(Context.Context, AST);
+ }
+}; // end class Z3Expr
+
+static const Z3Expr &toZ3Expr(const SMTExpr &E) {
+ return static_cast<const Z3Expr &>(E);
+}
+
+class Z3Model {
+ friend class Z3Solver;
+
+ Z3Context &Context;
+
+ Z3_model Model;
+
+public:
+ Z3Model(Z3Context &C, Z3_model ZM) : Context(C), Model(ZM) {
+ Z3_model_inc_ref(Context.Context, Model);
+ }
+
+ Z3Model(const Z3Model &Other) = delete;
+ Z3Model(Z3Model &&Other) = delete;
+ Z3Model &operator=(Z3Model &Other) = delete;
+ Z3Model &operator=(Z3Model &&Other) = delete;
+
+ ~Z3Model() {
+ if (Model)
+ Z3_model_dec_ref(Context.Context, Model);
+ }
+
+ void print(raw_ostream &OS) const {
+ OS << Z3_model_to_string(Context.Context, Model);
+ }
+
+ LLVM_DUMP_METHOD void dump() const { print(llvm::errs()); }
+}; // end class Z3Model
+
+/// Get the corresponding IEEE floating-point type for a given bitwidth.
+static const llvm::fltSemantics &getFloatSemantics(unsigned BitWidth) {
+ switch (BitWidth) {
+ default:
+ llvm_unreachable("Unsupported floating-point semantics!");
+ break;
+ case 16:
+ return llvm::APFloat::IEEEhalf();
+ case 32:
+ return llvm::APFloat::IEEEsingle();
+ case 64:
+ return llvm::APFloat::IEEEdouble();
+ case 128:
+ return llvm::APFloat::IEEEquad();
+ }
+}
+
+// Determine whether two float semantics are equivalent
+static bool areEquivalent(const llvm::fltSemantics &LHS,
+ const llvm::fltSemantics &RHS) {
+ return (llvm::APFloat::semanticsPrecision(LHS) ==
+ llvm::APFloat::semanticsPrecision(RHS)) &&
+ (llvm::APFloat::semanticsMinExponent(LHS) ==
+ llvm::APFloat::semanticsMinExponent(RHS)) &&
+ (llvm::APFloat::semanticsMaxExponent(LHS) ==
+ llvm::APFloat::semanticsMaxExponent(RHS)) &&
+ (llvm::APFloat::semanticsSizeInBits(LHS) ==
+ llvm::APFloat::semanticsSizeInBits(RHS));
+}
+
+class Z3Solver : public SMTSolver {
+ friend class Z3ConstraintManager;
+
+ Z3Context Context;
+
+ Z3_solver Solver;
+
+ // Cache Sorts
+ std::set<Z3Sort> CachedSorts;
+
+ // Cache Exprs
+ std::set<Z3Expr> CachedExprs;
+
+public:
+ Z3Solver() : Solver(Z3_mk_simple_solver(Context.Context)) {
+ Z3_solver_inc_ref(Context.Context, Solver);
+ }
+
+ Z3Solver(const Z3Solver &Other) = delete;
+ Z3Solver(Z3Solver &&Other) = delete;
+ Z3Solver &operator=(Z3Solver &Other) = delete;
+ Z3Solver &operator=(Z3Solver &&Other) = delete;
+
+ ~Z3Solver() {
+ if (Solver)
+ Z3_solver_dec_ref(Context.Context, Solver);
+ }
+
+ void addConstraint(const SMTExprRef &Exp) const override {
+ Z3_solver_assert(Context.Context, Solver, toZ3Expr(*Exp).AST);
+ }
+
+ // Given an SMTSort, adds/retrives it from the cache and returns
+ // an SMTSortRef to the SMTSort in the cache
+ SMTSortRef newSortRef(const SMTSort &Sort) {
+ auto It = CachedSorts.insert(toZ3Sort(Sort));
+ return &(*It.first);
+ }
+
+ // Given an SMTExpr, adds/retrives it from the cache and returns
+ // an SMTExprRef to the SMTExpr in the cache
+ SMTExprRef newExprRef(const SMTExpr &Exp) {
+ auto It = CachedExprs.insert(toZ3Expr(Exp));
+ return &(*It.first);
+ }
+
+ SMTSortRef getBoolSort() override {
+ return newSortRef(Z3Sort(Context, Z3_mk_bool_sort(Context.Context)));
+ }
+
+ SMTSortRef getBitvectorSort(unsigned BitWidth) override {
+ return newSortRef(
+ Z3Sort(Context, Z3_mk_bv_sort(Context.Context, BitWidth)));
+ }
+
+ SMTSortRef getSort(const SMTExprRef &Exp) override {
+ return newSortRef(
+ Z3Sort(Context, Z3_get_sort(Context.Context, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTSortRef getFloat16Sort() override {
+ return newSortRef(Z3Sort(Context, Z3_mk_fpa_sort_16(Context.Context)));
+ }
+
+ SMTSortRef getFloat32Sort() override {
+ return newSortRef(Z3Sort(Context, Z3_mk_fpa_sort_32(Context.Context)));
+ }
+
+ SMTSortRef getFloat64Sort() override {
+ return newSortRef(Z3Sort(Context, Z3_mk_fpa_sort_64(Context.Context)));
+ }
+
+ SMTSortRef getFloat128Sort() override {
+ return newSortRef(Z3Sort(Context, Z3_mk_fpa_sort_128(Context.Context)));
+ }
+
+ SMTExprRef mkBVNeg(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvneg(Context.Context, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkBVNot(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvnot(Context.Context, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkNot(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_not(Context.Context, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkBVAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvadd(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVSub(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsub(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVMul(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvmul(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVSRem(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsrem(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVURem(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvurem(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVSDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsdiv(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVUDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvudiv(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVShl(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvshl(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVAshr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvashr(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVLshr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvlshr(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVXor(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvxor(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVOr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvor(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvand(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVUlt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvult(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVSlt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvslt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVUgt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvugt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVSgt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsgt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVUle(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvule(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVSle(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsle(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVUge(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvuge(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkBVSge(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsge(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ Z3_ast Args[2] = {toZ3Expr(*LHS).AST, toZ3Expr(*RHS).AST};
+ return newExprRef(Z3Expr(Context, Z3_mk_and(Context.Context, 2, Args)));
+ }
+
+ SMTExprRef mkOr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ Z3_ast Args[2] = {toZ3Expr(*LHS).AST, toZ3Expr(*RHS).AST};
+ return newExprRef(Z3Expr(Context, Z3_mk_or(Context.Context, 2, Args)));
+ }
+
+ SMTExprRef mkEqual(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_eq(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkFPNeg(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_neg(Context.Context, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkFPIsInfinite(const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_is_infinite(Context.Context, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkFPIsNaN(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_is_nan(Context.Context, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkFPIsNormal(const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_is_normal(Context.Context, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkFPIsZero(const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_is_zero(Context.Context, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkFPMul(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(
+ Z3Expr(Context,
+ Z3_mk_fpa_mul(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
+ }
+
+ SMTExprRef mkFPDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(
+ Z3Expr(Context,
+ Z3_mk_fpa_div(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
+ }
+
+ SMTExprRef mkFPRem(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_rem(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkFPAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(
+ Z3Expr(Context,
+ Z3_mk_fpa_add(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
+ }
+
+ SMTExprRef mkFPSub(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(
+ Z3Expr(Context,
+ Z3_mk_fpa_sub(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
+ }
+
+ SMTExprRef mkFPLt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_lt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkFPGt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_gt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkFPLe(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_leq(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkFPGe(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_geq(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkFPEqual(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_eq(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkIte(const SMTExprRef &Cond, const SMTExprRef &T,
+ const SMTExprRef &F) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_ite(Context.Context, toZ3Expr(*Cond).AST,
+ toZ3Expr(*T).AST, toZ3Expr(*F).AST)));
+ }
+
+ SMTExprRef mkBVSignExt(unsigned i, const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_sign_ext(Context.Context, i, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkBVZeroExt(unsigned i, const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_zero_ext(Context.Context, i, toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkBVExtract(unsigned High, unsigned Low,
+ const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(Context, Z3_mk_extract(Context.Context, High, Low,
+ toZ3Expr(*Exp).AST)));
+ }
+
+ SMTExprRef mkBVConcat(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_concat(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
+
+ SMTExprRef mkFPtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context,
+ Z3_mk_fpa_to_fp_float(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
+ }
+
+ SMTExprRef mkSBVtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context,
+ Z3_mk_fpa_to_fp_signed(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
+ }
+
+ SMTExprRef mkUBVtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context,
+ Z3_mk_fpa_to_fp_unsigned(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
+ }
+
+ SMTExprRef mkFPtoSBV(const SMTExprRef &From, unsigned ToWidth) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_to_sbv(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, ToWidth)));
+ }
+
+ SMTExprRef mkFPtoUBV(const SMTExprRef &From, unsigned ToWidth) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_to_ubv(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, ToWidth)));
+ }
+
+ SMTExprRef mkBoolean(const bool b) override {
+ return newExprRef(Z3Expr(Context, b ? Z3_mk_true(Context.Context)
+ : Z3_mk_false(Context.Context)));
+ }
+
+ SMTExprRef mkBitvector(const llvm::APSInt Int, unsigned BitWidth) override {
+ const SMTSortRef Sort = getBitvectorSort(BitWidth);
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_numeral(Context.Context, Int.toString(10).c_str(),
+ toZ3Sort(*Sort).Sort)));
+ }
+
+ SMTExprRef mkFloat(const llvm::APFloat Float) override {
+ SMTSortRef Sort =
+ getFloatSort(llvm::APFloat::semanticsSizeInBits(Float.getSemantics()));
+
+ llvm::APSInt Int = llvm::APSInt(Float.bitcastToAPInt(), false);
+ SMTExprRef Z3Int = mkBitvector(Int, Int.getBitWidth());
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_to_fp_bv(Context.Context, toZ3Expr(*Z3Int).AST,
+ toZ3Sort(*Sort).Sort)));
+ }
+
+ SMTExprRef mkSymbol(const char *Name, SMTSortRef Sort) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_const(Context.Context,
+ Z3_mk_string_symbol(Context.Context, Name),
+ toZ3Sort(*Sort).Sort)));
+ }
+
+ llvm::APSInt getBitvector(const SMTExprRef &Exp, unsigned BitWidth,
+ bool isUnsigned) override {
+ return llvm::APSInt(
+ llvm::APInt(BitWidth,
+ Z3_get_numeral_string(Context.Context, toZ3Expr(*Exp).AST),
+ 10),
+ isUnsigned);
+ }
+
+ bool getBoolean(const SMTExprRef &Exp) override {
+ return Z3_get_bool_value(Context.Context, toZ3Expr(*Exp).AST) == Z3_L_TRUE;
+ }
+
+ SMTExprRef getFloatRoundingMode() override {
+ // TODO: Don't assume nearest ties to even rounding mode
+ return newExprRef(Z3Expr(Context, Z3_mk_fpa_rne(Context.Context)));
+ }
+
+ bool toAPFloat(const SMTSortRef &Sort, const SMTExprRef &AST,
+ llvm::APFloat &Float, bool useSemantics) {
+ assert(Sort->isFloatSort() && "Unsupported sort to floating-point!");
+
+ llvm::APSInt Int(Sort->getFloatSortSize(), true);
+ const llvm::fltSemantics &Semantics =
+ getFloatSemantics(Sort->getFloatSortSize());
+ SMTSortRef BVSort = getBitvectorSort(Sort->getFloatSortSize());
+ if (!toAPSInt(BVSort, AST, Int, true)) {
+ return false;
+ }
+
+ if (useSemantics && !areEquivalent(Float.getSemantics(), Semantics)) {
+ assert(false && "Floating-point types don't match!");
+ return false;
+ }
+
+ Float = llvm::APFloat(Semantics, Int);
+ return true;
+ }
+
+ bool toAPSInt(const SMTSortRef &Sort, const SMTExprRef &AST,
+ llvm::APSInt &Int, bool useSemantics) {
+ if (Sort->isBitvectorSort()) {
+ if (useSemantics && Int.getBitWidth() != Sort->getBitvectorSortSize()) {
+ assert(false && "Bitvector types don't match!");
+ return false;
+ }
+
+ // FIXME: This function is also used to retrieve floating-point values,
+ // which can be 16, 32, 64 or 128 bits long. Bitvectors can be anything
+ // between 1 and 64 bits long, which is the reason we have this weird
+ // guard. In the future, we need proper calls in the backend to retrieve
+ // floating-points and its special values (NaN, +/-infinity, +/-zero),
+ // then we can drop this weird condition.
+ if (Sort->getBitvectorSortSize() <= 64 ||
+ Sort->getBitvectorSortSize() == 128) {
+ Int = getBitvector(AST, Int.getBitWidth(), Int.isUnsigned());
+ return true;
+ }
+
+ assert(false && "Bitwidth not supported!");
+ return false;
+ }
+
+ if (Sort->isBooleanSort()) {
+ if (useSemantics && Int.getBitWidth() < 1) {
+ assert(false && "Boolean type doesn't match!");
+ return false;
+ }
+
+ Int = llvm::APSInt(llvm::APInt(Int.getBitWidth(), getBoolean(AST)),
+ Int.isUnsigned());
+ return true;
+ }
+
+ llvm_unreachable("Unsupported sort to integer!");
+ }
+
+ bool getInterpretation(const SMTExprRef &Exp, llvm::APSInt &Int) override {
+ Z3Model Model(Context, Z3_solver_get_model(Context.Context, Solver));
+ Z3_func_decl Func = Z3_get_app_decl(
+ Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
+ if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
+ return false;
+
+ SMTExprRef Assign = newExprRef(
+ Z3Expr(Context,
+ Z3_model_get_const_interp(Context.Context, Model.Model, Func)));
+ SMTSortRef Sort = getSort(Assign);
+ return toAPSInt(Sort, Assign, Int, true);
+ }
+
+ bool getInterpretation(const SMTExprRef &Exp, llvm::APFloat &Float) override {
+ Z3Model Model(Context, Z3_solver_get_model(Context.Context, Solver));
+ Z3_func_decl Func = Z3_get_app_decl(
+ Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
+ if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
+ return false;
+
+ SMTExprRef Assign = newExprRef(
+ Z3Expr(Context,
+ Z3_model_get_const_interp(Context.Context, Model.Model, Func)));
+ SMTSortRef Sort = getSort(Assign);
+ return toAPFloat(Sort, Assign, Float, true);
+ }
+
+ Optional<bool> check() const override {
+ Z3_lbool res = Z3_solver_check(Context.Context, Solver);
+ if (res == Z3_L_TRUE)
+ return true;
+
+ if (res == Z3_L_FALSE)
+ return false;
+
+ return Optional<bool>();
+ }
+
+ void push() override { return Z3_solver_push(Context.Context, Solver); }
+
+ void pop(unsigned NumStates = 1) override {
+ assert(Z3_solver_get_num_scopes(Context.Context, Solver) >= NumStates);
+ return Z3_solver_pop(Context.Context, Solver, NumStates);
+ }
+
+ bool isFPSupported() override { return true; }
+
+ /// Reset the solver and remove all constraints.
+ void reset() override { Z3_solver_reset(Context.Context, Solver); }
+
+ void print(raw_ostream &OS) const override {
+ OS << Z3_solver_to_string(Context.Context, Solver);
+ }
+}; // end class Z3Solver
+
+} // end anonymous namespace
+
+#endif
+
+SMTSolverRef clang::ento::CreateZ3Solver() {
+#if CLANG_ANALYZER_WITH_Z3
+ return llvm::make_unique<Z3Solver>();
+#else
+ llvm::report_fatal_error("Clang was not compiled with Z3 support, rebuild "
+ "with -DCLANG_ANALYZER_ENABLE_Z3_SOLVER=ON",
+ false);
+ return nullptr;
+#endif
+}
+
+std::unique_ptr<ConstraintManager>
+ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
+ return llvm::make_unique<SMTConstraintManager>(Eng, StMgr.getSValBuilder());
+}
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index f5685e4d4f..7d9dc429a4 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -144,7 +144,7 @@ if (CLANG_ENABLE_STATIC_ANALYZER)
set_target_properties(check-clang-analyzer PROPERTIES FOLDER "Clang tests")
- if (LLVM_WITH_Z3)
+ if (CLANG_ANALYZER_WITH_Z3)
add_lit_testsuite(check-clang-analyzer-z3 "Running the Clang analyzer tests, using Z3 as a solver"
${CMAKE_CURRENT_BINARY_DIR}/Analysis
PARAMS ${ANALYZER_TEST_PARAMS_Z3}
diff --git a/test/lit.site.cfg.py.in b/test/lit.site.cfg.py.in
index 953fd8dd33..fd89fd13a6 100644
--- a/test/lit.site.cfg.py.in
+++ b/test/lit.site.cfg.py.in
@@ -20,7 +20,7 @@ config.have_zlib = @HAVE_LIBZ@
config.clang_arcmt = @CLANG_ENABLE_ARCMT@
config.clang_default_cxx_stdlib = "@CLANG_DEFAULT_CXX_STDLIB@"
config.clang_staticanalyzer = @CLANG_ENABLE_STATIC_ANALYZER@
-config.clang_staticanalyzer_z3 = "@LLVM_WITH_Z3@"
+config.clang_staticanalyzer_z3 = "@CLANG_ANALYZER_WITH_Z3@"
config.clang_examples = @CLANG_BUILD_EXAMPLES@
config.enable_shared = @ENABLE_SHARED@
config.enable_backtrace = @ENABLE_BACKTRACES@