summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorJordan Rupprecht <rupprecht@google.com>2018-12-19 02:24:12 +0000
committerJordan Rupprecht <rupprecht@google.com>2018-12-19 02:24:12 +0000
commit55c8788102d8fd203270fabd6513247b2d7fbd87 (patch)
tree63ab727404da1afaca89c6578f37f135a50922e7 /lib
parent9fa0a1f211b7c9a402767bc895a87481cf347230 (diff)
parent46efdf2ccc2a80aefebf8433dbf9c7c959f6e629 (diff)
Creating branches/google/stable and tags/google/stable/2018-12-18 from r349201
git-svn-id: https://llvm.org/svn/llvm-project/cfe/branches/google/stable@349597 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/ARCMigrate/ARCMT.cpp6
-rw-r--r--lib/ARCMigrate/FileRemapper.cpp2
-rw-r--r--lib/ARCMigrate/TransAutoreleasePool.cpp4
-rw-r--r--lib/ARCMigrate/TransRetainReleaseDealloc.cpp7
-rw-r--r--lib/ARCMigrate/TransUnbridgedCasts.cpp2
-rw-r--r--lib/ARCMigrate/Transforms.cpp4
-rw-r--r--lib/AST/ASTContext.cpp518
-rw-r--r--lib/AST/ASTDumper.cpp1085
-rw-r--r--lib/AST/ASTImporter.cpp7441
-rw-r--r--lib/AST/ASTStructuralEquivalence.cpp10
-rw-r--r--lib/AST/CMakeLists.txt5
-rw-r--r--lib/AST/CommentParser.cpp1
-rw-r--r--lib/AST/Decl.cpp98
-rw-r--r--lib/AST/DeclBase.cpp19
-rw-r--r--lib/AST/DeclCXX.cpp20
-rw-r--r--lib/AST/DeclPrinter.cpp8
-rw-r--r--lib/AST/DeclTemplate.cpp34
-rw-r--r--lib/AST/Expr.cpp440
-rw-r--r--lib/AST/ExprCXX.cpp22
-rw-r--r--lib/AST/ExprClassification.cpp3
-rw-r--r--lib/AST/ExprConstant.cpp450
-rw-r--r--lib/AST/ExternalASTMerger.cpp10
-rw-r--r--lib/AST/FormatString.cpp (renamed from lib/Analysis/FormatString.cpp)42
-rw-r--r--lib/AST/FormatStringParsing.h (renamed from lib/Analysis/FormatStringParsing.h)6
-rw-r--r--lib/AST/ItaniumMangle.cpp34
-rw-r--r--lib/AST/Mangle.cpp6
-rw-r--r--lib/AST/MicrosoftMangle.cpp94
-rw-r--r--lib/AST/NSAPI.cpp9
-rw-r--r--lib/AST/NestedNameSpecifier.cpp30
-rw-r--r--lib/AST/ODRHash.cpp2
-rw-r--r--lib/AST/OSLog.cpp (renamed from lib/Analysis/OSLog.cpp)21
-rw-r--r--lib/AST/OpenMPClause.cpp448
-rw-r--r--lib/AST/ParentMap.cpp2
-rw-r--r--lib/AST/PrintfFormatString.cpp (renamed from lib/Analysis/PrintfFormatString.cpp)70
-rw-r--r--lib/AST/RawCommentList.cpp2
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp13
-rw-r--r--lib/AST/ScanfFormatString.cpp (renamed from lib/Analysis/ScanfFormatString.cpp)2
-rw-r--r--lib/AST/Stmt.cpp288
-rw-r--r--lib/AST/StmtOpenMP.cpp12
-rw-r--r--lib/AST/StmtPrinter.cpp446
-rw-r--r--lib/AST/StmtProfile.cpp18
-rw-r--r--lib/AST/TextNodeDumper.cpp285
-rw-r--r--lib/AST/Type.cpp118
-rw-r--r--lib/AST/TypeLoc.cpp3
-rw-r--r--lib/AST/TypePrinter.cpp24
-rw-r--r--lib/AST/VTableBuilder.cpp27
-rw-r--r--lib/ASTMatchers/ASTMatchFinder.cpp23
-rw-r--r--lib/ASTMatchers/ASTMatchersInternal.cpp12
-rw-r--r--lib/ASTMatchers/Dynamic/Parser.cpp14
-rw-r--r--lib/ASTMatchers/Dynamic/Registry.cpp42
-rw-r--r--lib/Analysis/BodyFarm.cpp38
-rw-r--r--lib/Analysis/CFG.cpp64
-rw-r--r--lib/Analysis/CMakeLists.txt4
-rw-r--r--lib/Analysis/CallGraph.cpp2
-rw-r--r--lib/Analysis/CloneDetection.cpp5
-rw-r--r--lib/Analysis/LiveVariables.cpp4
-rw-r--r--lib/Analysis/ProgramPoint.cpp4
-rw-r--r--lib/Analysis/ReachableCode.cpp2
-rw-r--r--lib/Analysis/ThreadSafety.cpp124
-rw-r--r--lib/Analysis/ThreadSafetyCommon.cpp2
-rw-r--r--lib/Basic/Attributes.cpp9
-rw-r--r--lib/Basic/Builtins.cpp2
-rw-r--r--lib/Basic/CMakeLists.txt3
-rw-r--r--lib/Basic/CodeGenOptions.cpp (renamed from lib/Frontend/CodeGenOptions.cpp)4
-rw-r--r--lib/Basic/Cuda.cpp15
-rw-r--r--lib/Basic/Diagnostic.cpp8
-rw-r--r--lib/Basic/FileManager.cpp67
-rw-r--r--lib/Basic/FileSystemStatCache.cpp16
-rw-r--r--lib/Basic/IdentifierTable.cpp81
-rw-r--r--lib/Basic/Module.cpp2
-rw-r--r--lib/Basic/OpenMPKinds.cpp22
-rw-r--r--lib/Basic/SourceManager.cpp80
-rw-r--r--lib/Basic/Targets.cpp8
-rw-r--r--lib/Basic/Targets/AArch64.cpp13
-rw-r--r--lib/Basic/Targets/AArch64.h1
-rw-r--r--lib/Basic/Targets/AMDGPU.cpp3
-rw-r--r--lib/Basic/Targets/ARC.cpp25
-rw-r--r--lib/Basic/Targets/ARC.h74
-rw-r--r--lib/Basic/Targets/ARM.cpp1
-rw-r--r--lib/Basic/Targets/Hexagon.cpp18
-rw-r--r--lib/Basic/Targets/Mips.h7
-rw-r--r--lib/Basic/Targets/NVPTX.cpp3
-rw-r--r--lib/Basic/Targets/OSTargets.cpp2
-rw-r--r--lib/Basic/Targets/OSTargets.h45
-rw-r--r--lib/Basic/Targets/PPC.cpp30
-rw-r--r--lib/Basic/Targets/PPC.h3
-rw-r--r--lib/Basic/Targets/X86.cpp20
-rw-r--r--lib/Basic/Targets/X86.h4
-rw-r--r--lib/Basic/VirtualFileSystem.cpp2142
-rw-r--r--lib/CodeGen/BackendUtil.cpp50
-rw-r--r--lib/CodeGen/CGAtomic.cpp2
-rw-r--r--lib/CodeGen/CGBlocks.cpp262
-rw-r--r--lib/CodeGen/CGBuiltin.cpp1075
-rw-r--r--lib/CodeGen/CGCUDANV.cpp33
-rw-r--r--lib/CodeGen/CGCXX.cpp4
-rw-r--r--lib/CodeGen/CGCall.cpp93
-rw-r--r--lib/CodeGen/CGCall.h14
-rw-r--r--lib/CodeGen/CGClass.cpp22
-rw-r--r--lib/CodeGen/CGCleanup.cpp4
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp286
-rw-r--r--lib/CodeGen/CGDebugInfo.h37
-rw-r--r--lib/CodeGen/CGDecl.cpp112
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp91
-rw-r--r--lib/CodeGen/CGException.cpp16
-rw-r--r--lib/CodeGen/CGExpr.cpp63
-rw-r--r--lib/CodeGen/CGExprAgg.cpp9
-rw-r--r--lib/CodeGen/CGExprCXX.cpp27
-rw-r--r--lib/CodeGen/CGExprComplex.cpp8
-rw-r--r--lib/CodeGen/CGExprConstant.cpp36
-rw-r--r--lib/CodeGen/CGExprScalar.cpp434
-rw-r--r--lib/CodeGen/CGLoopInfo.cpp10
-rw-r--r--lib/CodeGen/CGNonTrivialStruct.cpp7
-rw-r--r--lib/CodeGen/CGObjC.cpp217
-rw-r--r--lib/CodeGen/CGObjCMac.cpp20
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp2
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.cpp40
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.h4
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp334
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h79
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp1832
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.h82
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp2
-rw-r--r--lib/CodeGen/CGStmt.cpp13
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp184
-rw-r--r--lib/CodeGen/CGVTables.cpp13
-rw-r--r--lib/CodeGen/CGValue.h5
-rw-r--r--lib/CodeGen/CMakeLists.txt1
-rw-r--r--lib/CodeGen/CodeGenABITypes.cpp1
-rw-r--r--lib/CodeGen/CodeGenAction.cpp16
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp95
-rw-r--r--lib/CodeGen/CodeGenFunction.h66
-rw-r--r--lib/CodeGen/CodeGenModule.cpp427
-rw-r--r--lib/CodeGen/CodeGenModule.h27
-rw-r--r--lib/CodeGen/CodeGenPGO.h1
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp2
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp3
-rw-r--r--lib/CodeGen/CodeGenTypes.h1
-rw-r--r--lib/CodeGen/ConstantEmitter.h3
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp73
-rw-r--r--lib/CodeGen/CoverageMappingGen.h1
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp73
-rw-r--r--lib/CodeGen/MacroPPCallbacks.cpp17
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp11
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--lib/CodeGen/ObjectFilePCHContainerOperations.cpp5
-rw-r--r--lib/CodeGen/SwiftCallingConv.cpp40
-rw-r--r--lib/CodeGen/TargetInfo.cpp211
-rw-r--r--lib/CodeGen/VarBypassDetector.cpp2
-rw-r--r--lib/CrossTU/CrossTranslationUnit.cpp105
-rw-r--r--lib/Driver/CMakeLists.txt1
-rw-r--r--lib/Driver/Compilation.cpp2
-rw-r--r--lib/Driver/Distro.cpp6
-rw-r--r--lib/Driver/Driver.cpp234
-rw-r--r--lib/Driver/Job.cpp19
-rw-r--r--lib/Driver/SanitizerArgs.cpp23
-rw-r--r--lib/Driver/ToolChain.cpp24
-rw-r--r--lib/Driver/ToolChains/AMDGPU.cpp2
-rw-r--r--lib/Driver/ToolChains/Arch/AArch64.cpp81
-rw-r--r--lib/Driver/ToolChains/Arch/ARM.cpp29
-rw-r--r--lib/Driver/ToolChains/Arch/ARM.h2
-rw-r--r--lib/Driver/ToolChains/Arch/Mips.cpp8
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.cpp10
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.h2
-rw-r--r--lib/Driver/ToolChains/BareMetal.cpp7
-rw-r--r--lib/Driver/ToolChains/Clang.cpp385
-rw-r--r--lib/Driver/ToolChains/CommonArgs.cpp115
-rw-r--r--lib/Driver/ToolChains/CommonArgs.h6
-rw-r--r--lib/Driver/ToolChains/CrossWindows.cpp1
-rw-r--r--lib/Driver/ToolChains/Cuda.cpp91
-rw-r--r--lib/Driver/ToolChains/Cuda.h2
-rw-r--r--lib/Driver/ToolChains/Darwin.cpp50
-rw-r--r--lib/Driver/ToolChains/Darwin.h4
-rw-r--r--lib/Driver/ToolChains/FreeBSD.cpp2
-rw-r--r--lib/Driver/ToolChains/Fuchsia.cpp22
-rw-r--r--lib/Driver/ToolChains/Gnu.cpp138
-rw-r--r--lib/Driver/ToolChains/HIP.cpp47
-rw-r--r--lib/Driver/ToolChains/HIP.h5
-rw-r--r--lib/Driver/ToolChains/Hexagon.cpp12
-rw-r--r--lib/Driver/ToolChains/Hexagon.h3
-rw-r--r--lib/Driver/ToolChains/Hurd.cpp169
-rw-r--r--lib/Driver/ToolChains/Hurd.h46
-rw-r--r--lib/Driver/ToolChains/Linux.cpp127
-rw-r--r--lib/Driver/ToolChains/Linux.h1
-rw-r--r--lib/Driver/ToolChains/MSVC.cpp10
-rw-r--r--lib/Driver/ToolChains/MSVC.h13
-rw-r--r--lib/Driver/ToolChains/MinGW.cpp60
-rw-r--r--lib/Driver/ToolChains/MinGW.h6
-rw-r--r--lib/Driver/ToolChains/Minix.cpp4
-rw-r--r--lib/Driver/ToolChains/NetBSD.cpp2
-rw-r--r--lib/Driver/ToolChains/OpenBSD.cpp66
-rw-r--r--lib/Driver/ToolChains/OpenBSD.h8
-rw-r--r--lib/Driver/XRayArgs.cpp9
-rw-r--r--lib/Edit/RewriteObjCFoundationAPI.cpp7
-rw-r--r--lib/Format/ContinuationIndenter.cpp38
-rw-r--r--lib/Format/Format.cpp207
-rw-r--r--lib/Format/FormatToken.h15
-rw-r--r--lib/Format/FormatTokenLexer.cpp11
-rw-r--r--lib/Format/FormatTokenLexer.h4
-rw-r--r--lib/Format/TokenAnnotator.cpp59
-rw-r--r--lib/Format/UnwrappedLineFormatter.cpp7
-rw-r--r--lib/Format/UnwrappedLineParser.cpp29
-rw-r--r--lib/Format/UnwrappedLineParser.h1
-rw-r--r--lib/Frontend/ASTConsumers.cpp339
-rw-r--r--lib/Frontend/ASTUnit.cpp44
-rw-r--r--lib/Frontend/CMakeLists.txt3
-rw-r--r--lib/Frontend/CacheTokens.cpp700
-rw-r--r--lib/Frontend/ChainedIncludesSource.cpp1
-rw-r--r--lib/Frontend/CompilerInstance.cpp41
-rw-r--r--lib/Frontend/CompilerInvocation.cpp324
-rw-r--r--lib/Frontend/CreateInvocationFromCommandLine.cpp2
-rw-r--r--lib/Frontend/DependencyFile.cpp1
-rw-r--r--lib/Frontend/FrontendAction.cpp18
-rw-r--r--lib/Frontend/FrontendActions.cpp16
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp27
-rw-r--r--lib/Frontend/InitPreprocessor.cpp38
-rw-r--r--lib/Frontend/ModuleDependencyCollector.cpp4
-rw-r--r--lib/Frontend/PrecompiledPreamble.cpp36
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp5
-rw-r--r--lib/Frontend/Rewrite/InclusionRewriter.cpp1
-rw-r--r--lib/Frontend/Rewrite/RewriteModernObjC.cpp15
-rw-r--r--lib/Frontend/Rewrite/RewriteObjC.cpp2
-rw-r--r--lib/FrontendTool/ExecuteCompilerInvocation.cpp17
-rw-r--r--lib/Headers/CMakeLists.txt6
-rw-r--r--lib/Headers/adxintrin.h4
-rw-r--r--lib/Headers/altivec.h123
-rw-r--r--lib/Headers/avx512bwintrin.h37
-rw-r--r--lib/Headers/avx512fintrin.h36
-rw-r--r--lib/Headers/avx512vlbwintrin.h75
-rw-r--r--lib/Headers/avx512vlintrin.h253
-rw-r--r--lib/Headers/cuda_wrappers/new6
-rw-r--r--lib/Headers/emmintrin.h107
-rw-r--r--lib/Headers/intrin.h525
-rw-r--r--lib/Headers/lzcntintrin.h16
-rw-r--r--lib/Headers/opencl-c.h642
-rw-r--r--lib/Index/CommentToXML.cpp1
-rw-r--r--lib/Index/IndexTypeSourceInfo.cpp3
-rw-r--r--lib/Index/SimpleFormatContext.h10
-rw-r--r--lib/Index/USRGeneration.cpp26
-rw-r--r--lib/Lex/CMakeLists.txt1
-rw-r--r--lib/Lex/HeaderSearch.cpp15
-rw-r--r--lib/Lex/Lexer.cpp4
-rw-r--r--lib/Lex/LiteralSupport.cpp2
-rw-r--r--lib/Lex/MacroInfo.cpp3
-rw-r--r--lib/Lex/ModuleMap.cpp16
-rw-r--r--lib/Lex/PPDirectives.cpp160
-rw-r--r--lib/Lex/PPLexerChange.cpp105
-rw-r--r--lib/Lex/PPMacroExpansion.cpp5
-rw-r--r--lib/Lex/PTHLexer.cpp748
-rw-r--r--lib/Lex/Pragma.cpp16
-rw-r--r--lib/Lex/Preprocessor.cpp14
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp10
-rw-r--r--lib/Parse/ParseDecl.cpp56
-rw-r--r--lib/Parse/ParseDeclCXX.cpp148
-rw-r--r--lib/Parse/ParseExpr.cpp46
-rw-r--r--lib/Parse/ParseExprCXX.cpp39
-rw-r--r--lib/Parse/ParseInit.cpp6
-rw-r--r--lib/Parse/ParseObjc.cpp20
-rw-r--r--lib/Parse/ParseOpenMP.cpp127
-rw-r--r--lib/Parse/ParsePragma.cpp141
-rw-r--r--lib/Parse/ParseStmt.cpp45
-rw-r--r--lib/Parse/ParseTemplate.cpp35
-rw-r--r--lib/Parse/ParseTentative.cpp20
-rw-r--r--lib/Parse/Parser.cpp16
-rw-r--r--lib/Rewrite/HTMLRewrite.cpp2
-rw-r--r--lib/Rewrite/RewriteRope.cpp4
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp7
-rw-r--r--lib/Sema/CMakeLists.txt1
-rw-r--r--lib/Sema/CodeCompleteConsumer.cpp190
-rw-r--r--lib/Sema/DeclSpec.cpp10
-rw-r--r--lib/Sema/ParsedAttr.cpp39
-rw-r--r--lib/Sema/ScopeInfo.cpp2
-rw-r--r--lib/Sema/Sema.cpp104
-rw-r--r--lib/Sema/SemaAccess.cpp31
-rw-r--r--lib/Sema/SemaAttr.cpp72
-rw-r--r--lib/Sema/SemaCast.cpp52
-rw-r--r--lib/Sema/SemaChecking.cpp1762
-rw-r--r--lib/Sema/SemaCodeComplete.cpp2789
-rw-r--r--lib/Sema/SemaCoroutine.cpp17
-rw-r--r--lib/Sema/SemaDecl.cpp512
-rw-r--r--lib/Sema/SemaDeclAttr.cpp235
-rw-r--r--lib/Sema/SemaDeclCXX.cpp161
-rw-r--r--lib/Sema/SemaDeclObjC.cpp4
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp3
-rw-r--r--lib/Sema/SemaExpr.cpp642
-rw-r--r--lib/Sema/SemaExprCXX.cpp84
-rw-r--r--lib/Sema/SemaExprMember.cpp30
-rw-r--r--lib/Sema/SemaExprObjC.cpp4
-rw-r--r--lib/Sema/SemaInit.cpp221
-rw-r--r--lib/Sema/SemaLambda.cpp42
-rw-r--r--lib/Sema/SemaLookup.cpp15
-rw-r--r--lib/Sema/SemaOpenMP.cpp375
-rw-r--r--lib/Sema/SemaOverload.cpp230
-rw-r--r--lib/Sema/SemaStmt.cpp63
-rw-r--r--lib/Sema/SemaStmtAsm.cpp74
-rw-r--r--lib/Sema/SemaStmtAttr.cpp1
-rw-r--r--lib/Sema/SemaTemplate.cpp62
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp9
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp32
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp64
-rw-r--r--lib/Sema/SemaTemplateVariadic.cpp1
-rw-r--r--lib/Sema/SemaType.cpp101
-rw-r--r--lib/Sema/TreeTransform.h101
-rw-r--r--lib/Serialization/ASTCommon.cpp5
-rw-r--r--lib/Serialization/ASTReader.cpp41
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp27
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp216
-rw-r--r--lib/Serialization/ASTWriter.cpp24
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp3
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp115
-rw-r--r--lib/Serialization/CMakeLists.txt1
-rw-r--r--lib/Serialization/GlobalModuleIndex.cpp2
-rw-r--r--lib/Serialization/ModuleManager.cpp6
-rw-r--r--lib/Serialization/PCHContainerOperations.cpp (renamed from lib/Frontend/PCHContainerOperations.cpp)4
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp24
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h31
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/CMakeLists.txt4
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp66
-rw-r--r--lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp20
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ClangCheckers.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ClangSACheckers.h2
-rw-r--r--lib/StaticAnalyzer/Checkers/CloneChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/ConversionChecker.cpp55
-rw-r--r--lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/DebugCheckers.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp128
-rw-r--r--lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/IteratorChecker.cpp308
-rw-r--r--lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp44
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp67
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/MoveChecker.cpp (renamed from lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp)357
-rw-r--r--lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp32
-rw-r--r--lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/PaddingChecker.cpp41
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp272
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h75
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp519
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h116
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp34
-rw-r--r--lib/StaticAnalyzer/Checkers/StreamChecker.cpp34
-rw-r--r--lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp22
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h18
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp116
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp81
-rw-r--r--lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Core/AnalysisManager.cpp21
-rw-r--r--lib/StaticAnalyzer/Core/AnalyzerOptions.cpp490
-rw-r--r--lib/StaticAnalyzer/Core/BasicValueFactory.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/BugReporter.cpp32
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp85
-rw-r--r--lib/StaticAnalyzer/Core/CMakeLists.txt6
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp9
-rw-r--r--lib/StaticAnalyzer/Core/Checker.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/CheckerManager.cpp4
-rw-r--r--lib/StaticAnalyzer/Core/CheckerRegistry.cpp59
-rw-r--r--lib/StaticAnalyzer/Core/CoreEngine.cpp20
-rw-r--r--lib/StaticAnalyzer/Core/DynamicTypeMap.cpp5
-rw-r--r--lib/StaticAnalyzer/Core/Environment.cpp9
-rw-r--r--lib/StaticAnalyzer/Core/ExplodedGraph.cpp8
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp111
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineC.cpp14
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCXX.cpp12
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp33
-rw-r--r--lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/MemRegion.cpp9
-rw-r--r--lib/StaticAnalyzer/Core/PlistDiagnostics.cpp850
-rw-r--r--lib/StaticAnalyzer/Core/ProgramState.cpp26
-rw-r--r--lib/StaticAnalyzer/Core/RangeConstraintManager.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/RangedConstraintManager.cpp5
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp27
-rw-r--r--lib/StaticAnalyzer/Core/RetainSummaryManager.cpp352
-rw-r--r--lib/StaticAnalyzer/Core/SValBuilder.cpp6
-rw-r--r--lib/StaticAnalyzer/Core/SVals.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/SarifDiagnostics.cpp332
-rw-r--r--lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp13
-rw-r--r--lib/StaticAnalyzer/Core/Store.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/SymbolManager.cpp20
-rw-r--r--lib/StaticAnalyzer/Core/TaintManager.cpp23
-rw-r--r--lib/StaticAnalyzer/Core/WorkList.cpp62
-rw-r--r--lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp224
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp4
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp101
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.cpp2
-rw-r--r--lib/StaticAnalyzer/README.txt12
-rw-r--r--lib/Tooling/ASTDiff/ASTDiff.cpp5
-rw-r--r--lib/Tooling/AllTUsExecution.cpp16
-rw-r--r--lib/Tooling/CMakeLists.txt1
-rw-r--r--lib/Tooling/Core/Diagnostic.cpp9
-rw-r--r--lib/Tooling/Core/Replacement.cpp6
-rw-r--r--lib/Tooling/Execution.cpp2
-rw-r--r--lib/Tooling/Inclusions/HeaderIncludes.cpp3
-rw-r--r--lib/Tooling/StandaloneExecution.cpp2
-rw-r--r--lib/Tooling/Tooling.cpp44
416 files changed, 25073 insertions, 18693 deletions
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
index 3c7b593be6..6da87903a4 100644
--- a/lib/ARCMigrate/ARCMT.cpp
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -190,8 +190,6 @@ createInvocationForMigration(CompilerInvocation &origCI,
PPOpts.Includes.insert(PPOpts.Includes.begin(), OriginalFile);
PPOpts.ImplicitPCHInclude.clear();
}
- // FIXME: Get the original header of a PTH as well.
- CInvok->getPreprocessorOpts().ImplicitPTHInclude.clear();
std::string define = getARCMTMacroName();
define += '=';
CInvok->getPreprocessorOpts().addMacroDef(define);
@@ -241,7 +239,7 @@ bool arcmt::checkForManualIssues(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagClient, bool emitPremigrationARCErrors,
StringRef plistOut) {
- if (!origCI.getLangOpts()->ObjC1)
+ if (!origCI.getLangOpts()->ObjC)
return false;
LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
@@ -342,7 +340,7 @@ applyTransforms(CompilerInvocation &origCI, const FrontendInputFile &Input,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagClient, StringRef outputDir,
bool emitPremigrationARCErrors, StringRef plistOut) {
- if (!origCI.getLangOpts()->ObjC1)
+ if (!origCI.getLangOpts()->ObjC)
return false;
LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
diff --git a/lib/ARCMigrate/FileRemapper.cpp b/lib/ARCMigrate/FileRemapper.cpp
index ccc8c9ee30..225f47119b 100644
--- a/lib/ARCMigrate/FileRemapper.cpp
+++ b/lib/ARCMigrate/FileRemapper.cpp
@@ -226,7 +226,7 @@ void FileRemapper::remap(const FileEntry *file, const FileEntry *newfile) {
const FileEntry *FileRemapper::getOriginalFile(StringRef filePath) {
const FileEntry *file = FileMgr->getFile(filePath);
- // If we are updating a file that overriden an original file,
+ // If we are updating a file that overridden an original file,
// actually update the original file.
llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
I = ToFromMappings.find(file);
diff --git a/lib/ARCMigrate/TransAutoreleasePool.cpp b/lib/ARCMigrate/TransAutoreleasePool.cpp
index 1acf63cc2b..9d20774a89 100644
--- a/lib/ARCMigrate/TransAutoreleasePool.cpp
+++ b/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -403,8 +403,8 @@ private:
return cast<Expr>(getEssential((Stmt*)E));
}
static Stmt *getEssential(Stmt *S) {
- if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S))
- S = EWC->getSubExpr();
+ if (FullExpr *FE = dyn_cast<FullExpr>(S))
+ S = FE->getSubExpr();
if (Expr *E = dyn_cast<Expr>(S))
S = E->IgnoreParenCasts();
return S;
diff --git a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
index f2aceaa795..d199bb9365 100644
--- a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
+++ b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -113,6 +113,7 @@ public:
return true;
}
}
+ break;
case OMF_dealloc:
break;
}
@@ -253,7 +254,7 @@ private:
}
while (OuterS && (isa<ParenExpr>(OuterS) ||
isa<CastExpr>(OuterS) ||
- isa<ExprWithCleanups>(OuterS)));
+ isa<FullExpr>(OuterS)));
if (!OuterS)
return std::make_pair(prevStmt, nextStmt);
@@ -376,8 +377,8 @@ private:
RecContainer = StmtE;
Rec = Init->IgnoreParenImpCasts();
- if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Rec))
- Rec = EWC->getSubExpr()->IgnoreParenImpCasts();
+ if (FullExpr *FE = dyn_cast<FullExpr>(Rec))
+ Rec = FE->getSubExpr()->IgnoreParenImpCasts();
RecRange = Rec->getSourceRange();
if (SM.isMacroArgExpansion(RecRange.getBegin()))
RecRange.setBegin(SM.getImmediateSpellingLoc(RecRange.getBegin()));
diff --git a/lib/ARCMigrate/TransUnbridgedCasts.cpp b/lib/ARCMigrate/TransUnbridgedCasts.cpp
index 631eceb03f..9d46d8c5fc 100644
--- a/lib/ARCMigrate/TransUnbridgedCasts.cpp
+++ b/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -372,7 +372,7 @@ private:
Stmt *parent = E;
do {
parent = StmtMap->getParentIgnoreParenImpCasts(parent);
- } while (parent && isa<ExprWithCleanups>(parent));
+ } while (parent && isa<FullExpr>(parent));
if (ReturnStmt *retS = dyn_cast_or_null<ReturnStmt>(parent)) {
std::string note = "remove the cast and change return type of function "
diff --git a/lib/ARCMigrate/Transforms.cpp b/lib/ARCMigrate/Transforms.cpp
index a403744de7..8bd2b407ae 100644
--- a/lib/ARCMigrate/Transforms.cpp
+++ b/lib/ARCMigrate/Transforms.cpp
@@ -74,8 +74,8 @@ bool trans::isPlusOneAssign(const BinaryOperator *E) {
bool trans::isPlusOne(const Expr *E) {
if (!E)
return false;
- if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E))
- E = EWC->getSubExpr();
+ if (const FullExpr *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr();
if (const ObjCMessageExpr *
ME = dyn_cast<ObjCMessageExpr>(E->IgnoreParenCasts()))
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 3d0d1e666c..fae1cf863c 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -796,11 +796,10 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
CompCategories(this_()), LastSDM(nullptr, 0) {
TUDecl = TranslationUnitDecl::Create(*this);
+ TraversalScope = {TUDecl};
}
ASTContext::~ASTContext() {
- ReleaseParentMapEntries();
-
// Release the DenseMaps associated with DeclContext objects.
// FIXME: Is this the ideal solution?
ReleaseDeclContextMaps();
@@ -838,22 +837,80 @@ ASTContext::~ASTContext() {
Value.second->~PerModuleInitializers();
}
-void ASTContext::ReleaseParentMapEntries() {
- if (!PointerParents) return;
- for (const auto &Entry : *PointerParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
+class ASTContext::ParentMap {
+ /// Contains parents of a node.
+ using ParentVector = llvm::SmallVector<ast_type_traits::DynTypedNode, 2>;
+
+ /// Maps from a node to its parents. This is used for nodes that have
+ /// pointer identity only, which are more common and we can save space by
+ /// only storing a unique pointer to them.
+ using ParentMapPointers = llvm::DenseMap<
+ const void *,
+ llvm::PointerUnion4<const Decl *, const Stmt *,
+ ast_type_traits::DynTypedNode *, ParentVector *>>;
+
+ /// Parent map for nodes without pointer identity. We store a full
+ /// DynTypedNode for all keys.
+ using ParentMapOtherNodes = llvm::DenseMap<
+ ast_type_traits::DynTypedNode,
+ llvm::PointerUnion4<const Decl *, const Stmt *,
+ ast_type_traits::DynTypedNode *, ParentVector *>>;
+
+ ParentMapPointers PointerParents;
+ ParentMapOtherNodes OtherParents;
+ class ASTVisitor;
+
+ static ast_type_traits::DynTypedNode
+ getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
+ if (const auto *D = U.dyn_cast<const Decl *>())
+ return ast_type_traits::DynTypedNode::create(*D);
+ if (const auto *S = U.dyn_cast<const Stmt *>())
+ return ast_type_traits::DynTypedNode::create(*S);
+ return *U.get<ast_type_traits::DynTypedNode *>();
+ }
+
+ template <typename NodeTy, typename MapTy>
+ static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
+ const MapTy &Map) {
+ auto I = Map.find(Node);
+ if (I == Map.end()) {
+ return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
+ }
+ if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
+ return llvm::makeArrayRef(*V);
+ }
+ return getSingleDynTypedNodeFromParentMap(I->second);
+ }
+
+public:
+ ParentMap(ASTContext &Ctx);
+ ~ParentMap() {
+ for (const auto &Entry : PointerParents) {
+ if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
+ delete Entry.second.get<ast_type_traits::DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
}
- }
- for (const auto &Entry : *OtherParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
+ for (const auto &Entry : OtherParents) {
+ if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
+ delete Entry.second.get<ast_type_traits::DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
}
}
+
+ DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node) {
+ if (Node.getNodeKind().hasPointerIdentity())
+ return getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
+ return getDynNodeFromMap(Node, OtherParents);
+ }
+};
+
+void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
+ TraversalScope = TopLevelDecls;
+ Parents.reset();
}
void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
@@ -1241,6 +1298,10 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
+
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ InitBuiltinType(Id##Ty, BuiltinType::Id);
+#include "clang/Basic/OpenCLExtensionTypes.def"
}
// Builtin type for __objc_yes and __objc_no
@@ -1892,6 +1953,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
AS = getTargetAddressSpace(
Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)));
Width = Target->getPointerWidth(AS);
@@ -2295,12 +2359,11 @@ structHasUniqueObjectRepresentations(const ASTContext &Context,
}
}
- llvm::sort(
- Bases.begin(), Bases.end(), [&](const std::pair<QualType, int64_t> &L,
- const std::pair<QualType, int64_t> &R) {
- return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
- Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
- });
+ llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L,
+ const std::pair<QualType, int64_t> &R) {
+ return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
+ Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
+ });
for (const auto Base : Bases) {
int64_t BaseOffset = Context.toBits(
@@ -3698,30 +3761,20 @@ QualType ASTContext::getFunctionTypeInternal(
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- // FunctionProtoType objects are allocated with extra bytes after
- // them for three variable size arrays at the end:
- // - parameter types
- // - exception types
- // - extended parameter information
- // Instead of the exception types, there could be a noexcept
- // expression, or information used to resolve the exception
- // specification.
- size_t Size =
- sizeof(FunctionProtoType) + NumArgs * sizeof(QualType) +
- FunctionProtoType::getExceptionSpecSize(
- EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
-
- // Put the ExtParameterInfos last. If all were equal, it would make
- // more sense to put these before the exception specification, because
- // it's much easier to skip past them compared to the elaborate switch
- // required to skip the exception specification. However, all is not
- // equal; ExtParameterInfos are used to model very uncommon features,
- // and it's better not to burden the more common paths.
- if (EPI.ExtParameterInfos) {
- Size += NumArgs * sizeof(FunctionProtoType::ExtParameterInfo);
- }
-
- auto *FTP = (FunctionProtoType *) Allocate(Size, TypeAlignment);
+ // Compute the needed size to hold this FunctionProtoType and the
+ // various trailing objects.
+ auto ESH = FunctionProtoType::getExceptionSpecSize(
+ EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
+ size_t Size = FunctionProtoType::totalSizeToAlloc<
+ QualType, FunctionType::FunctionTypeExtraBitfields,
+ FunctionType::ExceptionType, Expr *, FunctionDecl *,
+ FunctionProtoType::ExtParameterInfo, Qualifiers>(
+ NumArgs, FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type),
+ ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
+ EPI.ExtParameterInfos ? NumArgs : 0,
+ EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
+
+ auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
FunctionProtoType::ExtProtoInfo newEPI = EPI;
new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
Types.push_back(FTP);
@@ -5103,7 +5156,7 @@ bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) {
return true;
}
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
if (T1OPType && T2OPType) {
@@ -5767,50 +5820,86 @@ int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
}
TypedefDecl *ASTContext::getCFConstantStringDecl() const {
- if (!CFConstantStringTypeDecl) {
- assert(!CFConstantStringTagDecl &&
- "tag and typedef should be initialized together");
- CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag");
- CFConstantStringTagDecl->startDefinition();
-
- QualType FieldTypes[4];
- const char *FieldNames[4];
-
- // const int *isa;
- FieldTypes[0] = getPointerType(IntTy.withConst());
- FieldNames[0] = "isa";
- // int flags;
- FieldTypes[1] = IntTy;
- FieldNames[1] = "flags";
- // const char *str;
- FieldTypes[2] = getPointerType(CharTy.withConst());
- FieldNames[2] = "str";
- // long length;
- FieldTypes[3] = LongTy;
- FieldNames[3] = "length";
-
- // Create fields
- for (unsigned i = 0; i < 4; ++i) {
- FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTagDecl,
- SourceLocation(),
- SourceLocation(),
- &Idents.get(FieldNames[i]),
- FieldTypes[i], /*TInfo=*/nullptr,
- /*BitWidth=*/nullptr,
- /*Mutable=*/false,
- ICIS_NoInit);
- Field->setAccess(AS_public);
- CFConstantStringTagDecl->addDecl(Field);
- }
+ if (CFConstantStringTypeDecl)
+ return CFConstantStringTypeDecl;
+
+ assert(!CFConstantStringTagDecl &&
+ "tag and typedef should be initialized together");
+ CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag");
+ CFConstantStringTagDecl->startDefinition();
- CFConstantStringTagDecl->completeDefinition();
- // This type is designed to be compatible with NSConstantString, but cannot
- // use the same name, since NSConstantString is an interface.
- auto tagType = getTagDeclType(CFConstantStringTagDecl);
- CFConstantStringTypeDecl =
- buildImplicitTypedef(tagType, "__NSConstantString");
+ struct {
+ QualType Type;
+ const char *Name;
+ } Fields[5];
+ unsigned Count = 0;
+
+ /// Objective-C ABI
+ ///
+ /// typedef struct __NSConstantString_tag {
+ /// const int *isa;
+ /// int flags;
+ /// const char *str;
+ /// long length;
+ /// } __NSConstantString;
+ ///
+ /// Swift ABI (4.1, 4.2)
+ ///
+ /// typedef struct __NSConstantString_tag {
+ /// uintptr_t _cfisa;
+ /// uintptr_t _swift_rc;
+ /// _Atomic(uint64_t) _cfinfoa;
+ /// const char *_ptr;
+ /// uint32_t _length;
+ /// } __NSConstantString;
+ ///
+ /// Swift ABI (5.0)
+ ///
+ /// typedef struct __NSConstantString_tag {
+ /// uintptr_t _cfisa;
+ /// uintptr_t _swift_rc;
+ /// _Atomic(uint64_t) _cfinfoa;
+ /// const char *_ptr;
+ /// uintptr_t _length;
+ /// } __NSConstantString;
+
+ const auto CFRuntime = getLangOpts().CFRuntime;
+ if (static_cast<unsigned>(CFRuntime) <
+ static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
+ Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" };
+ Fields[Count++] = { IntTy, "flags" };
+ Fields[Count++] = { getPointerType(CharTy.withConst()), "str" };
+ Fields[Count++] = { LongTy, "length" };
+ } else {
+ Fields[Count++] = { getUIntPtrType(), "_cfisa" };
+ Fields[Count++] = { getUIntPtrType(), "_swift_rc" };
+ Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" };
+ Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" };
+ if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
+ CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
+ Fields[Count++] = { IntTy, "_ptr" };
+ else
+ Fields[Count++] = { getUIntPtrType(), "_ptr" };
+ }
+
+ // Create fields
+ for (unsigned i = 0; i < Count; ++i) {
+ FieldDecl *Field =
+ FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(),
+ SourceLocation(), &Idents.get(Fields[i].Name),
+ Fields[i].Type, /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit);
+ Field->setAccess(AS_public);
+ CFConstantStringTagDecl->addDecl(Field);
}
+ CFConstantStringTagDecl->completeDefinition();
+ // This type is designed to be compatible with NSConstantString, but cannot
+ // use the same name, since NSConstantString is an interface.
+ auto tagType = getTagDeclType(CFConstantStringTagDecl);
+ CFConstantStringTypeDecl =
+ buildImplicitTypedef(tagType, "__NSConstantString");
+
return CFConstantStringTypeDecl;
}
@@ -6004,7 +6093,7 @@ bool ASTContext::BlockRequiresCopying(QualType Ty,
bool ASTContext::getByrefLifetime(QualType Ty,
Qualifiers::ObjCLifetime &LifeTime,
bool &HasByrefExtendedLayout) const {
- if (!getLangOpts().ObjC1 ||
+ if (!getLangOpts().ObjC ||
getLangOpts().getGC() != LangOptions::NonGC)
return false;
@@ -6476,6 +6565,9 @@ static char getObjCEncodingForPrimitiveKind(const ASTContext *C,
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
@@ -7682,7 +7774,7 @@ Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
if (getLangOpts().getGC() == LangOptions::NonGC)
return Qualifiers::GCNone;
- assert(getLangOpts().ObjC1);
+ assert(getLangOpts().ObjC);
Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
// Default behaviour under objective-C's gc is for ObjC pointers
@@ -8038,7 +8130,7 @@ void getIntersectionOfProtocols(ASTContext &Context,
// Also add the protocols associated with the LHS interface.
Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet);
- // Add all of the protocls for the RHS.
+ // Add all of the protocols for the RHS.
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
// Start with the protocol qualifiers.
@@ -9826,10 +9918,10 @@ void ASTContext::forEachMultiversionedFunctionVersion(
llvm::function_ref<void(FunctionDecl *)> Pred) const {
assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
- FD = FD->getCanonicalDecl();
+ FD = FD->getMostRecentDecl();
for (auto *CurDecl :
FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) {
- FunctionDecl *CurFD = CurDecl->getAsFunction()->getCanonicalDecl();
+ FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
if (CurFD && hasSameType(CurFD->getType(), FD->getType()) &&
std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) {
SeenDecls.insert(CurFD);
@@ -10066,21 +10158,10 @@ bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits);
}
-static ast_type_traits::DynTypedNode getSingleDynTypedNodeFromParentMap(
- ASTContext::ParentMapPointers::mapped_type U) {
- if (const auto *D = U.dyn_cast<const Decl *>())
- return ast_type_traits::DynTypedNode::create(*D);
- if (const auto *S = U.dyn_cast<const Stmt *>())
- return ast_type_traits::DynTypedNode::create(*S);
- return *U.get<ast_type_traits::DynTypedNode *>();
-}
-
-namespace {
-
/// Template specializations to abstract away from pointers and TypeLocs.
/// @{
template <typename T>
-ast_type_traits::DynTypedNode createDynTypedNode(const T &Node) {
+static ast_type_traits::DynTypedNode createDynTypedNode(const T &Node) {
return ast_type_traits::DynTypedNode::create(*Node);
}
template <>
@@ -10094,160 +10175,121 @@ createDynTypedNode(const NestedNameSpecifierLoc &Node) {
}
/// @}
- /// A \c RecursiveASTVisitor that builds a map from nodes to their
- /// parents as defined by the \c RecursiveASTVisitor.
- ///
- /// Note that the relationship described here is purely in terms of AST
- /// traversal - there are other relationships (for example declaration context)
- /// in the AST that are better modeled by special matchers.
- ///
- /// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
- class ParentMapASTVisitor : public RecursiveASTVisitor<ParentMapASTVisitor> {
- public:
- /// Builds and returns the translation unit's parent map.
- ///
- /// The caller takes ownership of the returned \c ParentMap.
- static std::pair<ASTContext::ParentMapPointers *,
- ASTContext::ParentMapOtherNodes *>
- buildMap(TranslationUnitDecl &TU) {
- ParentMapASTVisitor Visitor(new ASTContext::ParentMapPointers,
- new ASTContext::ParentMapOtherNodes);
- Visitor.TraverseDecl(&TU);
- return std::make_pair(Visitor.Parents, Visitor.OtherParents);
- }
+/// A \c RecursiveASTVisitor that builds a map from nodes to their
+/// parents as defined by the \c RecursiveASTVisitor.
+///
+/// Note that the relationship described here is purely in terms of AST
+/// traversal - there are other relationships (for example declaration context)
+/// in the AST that are better modeled by special matchers.
+///
+/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
+class ASTContext::ParentMap::ASTVisitor
+ : public RecursiveASTVisitor<ASTVisitor> {
+public:
+ ASTVisitor(ParentMap &Map) : Map(Map) {}
- private:
- friend class RecursiveASTVisitor<ParentMapASTVisitor>;
+private:
+ friend class RecursiveASTVisitor<ASTVisitor>;
- using VisitorBase = RecursiveASTVisitor<ParentMapASTVisitor>;
+ using VisitorBase = RecursiveASTVisitor<ASTVisitor>;
- ParentMapASTVisitor(ASTContext::ParentMapPointers *Parents,
- ASTContext::ParentMapOtherNodes *OtherParents)
- : Parents(Parents), OtherParents(OtherParents) {}
+ bool shouldVisitTemplateInstantiations() const { return true; }
- bool shouldVisitTemplateInstantiations() const {
- return true;
- }
+ bool shouldVisitImplicitCode() const { return true; }
- bool shouldVisitImplicitCode() const {
+ template <typename T, typename MapNodeTy, typename BaseTraverseFn,
+ typename MapTy>
+ bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
+ MapTy *Parents) {
+ if (!Node)
return true;
- }
-
- template <typename T, typename MapNodeTy, typename BaseTraverseFn,
- typename MapTy>
- bool TraverseNode(T Node, MapNodeTy MapNode,
- BaseTraverseFn BaseTraverse, MapTy *Parents) {
- if (!Node)
- return true;
- if (ParentStack.size() > 0) {
- // FIXME: Currently we add the same parent multiple times, but only
- // when no memoization data is available for the type.
- // For example when we visit all subexpressions of template
- // instantiations; this is suboptimal, but benign: the only way to
- // visit those is with hasAncestor / hasParent, and those do not create
- // new matches.
- // The plan is to enable DynTypedNode to be storable in a map or hash
- // map. The main problem there is to implement hash functions /
- // comparison operators for all types that DynTypedNode supports that
- // do not have pointer identity.
- auto &NodeOrVector = (*Parents)[MapNode];
- if (NodeOrVector.isNull()) {
- if (const auto *D = ParentStack.back().get<Decl>())
- NodeOrVector = D;
- else if (const auto *S = ParentStack.back().get<Stmt>())
- NodeOrVector = S;
- else
- NodeOrVector =
- new ast_type_traits::DynTypedNode(ParentStack.back());
- } else {
- if (!NodeOrVector.template is<ASTContext::ParentVector *>()) {
- auto *Vector = new ASTContext::ParentVector(
- 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
- delete NodeOrVector
- .template dyn_cast<ast_type_traits::DynTypedNode *>();
- NodeOrVector = Vector;
- }
-
- auto *Vector =
- NodeOrVector.template get<ASTContext::ParentVector *>();
- // Skip duplicates for types that have memoization data.
- // We must check that the type has memoization data before calling
- // std::find() because DynTypedNode::operator== can't compare all
- // types.
- bool Found = ParentStack.back().getMemoizationData() &&
- std::find(Vector->begin(), Vector->end(),
- ParentStack.back()) != Vector->end();
- if (!Found)
- Vector->push_back(ParentStack.back());
+ if (ParentStack.size() > 0) {
+ // FIXME: Currently we add the same parent multiple times, but only
+ // when no memoization data is available for the type.
+ // For example when we visit all subexpressions of template
+ // instantiations; this is suboptimal, but benign: the only way to
+ // visit those is with hasAncestor / hasParent, and those do not create
+ // new matches.
+ // The plan is to enable DynTypedNode to be storable in a map or hash
+ // map. The main problem there is to implement hash functions /
+ // comparison operators for all types that DynTypedNode supports that
+ // do not have pointer identity.
+ auto &NodeOrVector = (*Parents)[MapNode];
+ if (NodeOrVector.isNull()) {
+ if (const auto *D = ParentStack.back().get<Decl>())
+ NodeOrVector = D;
+ else if (const auto *S = ParentStack.back().get<Stmt>())
+ NodeOrVector = S;
+ else
+ NodeOrVector = new ast_type_traits::DynTypedNode(ParentStack.back());
+ } else {
+ if (!NodeOrVector.template is<ParentVector *>()) {
+ auto *Vector = new ParentVector(
+ 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
+ delete NodeOrVector
+ .template dyn_cast<ast_type_traits::DynTypedNode *>();
+ NodeOrVector = Vector;
}
- }
- ParentStack.push_back(createDynTypedNode(Node));
- bool Result = BaseTraverse();
- ParentStack.pop_back();
- return Result;
- }
- bool TraverseDecl(Decl *DeclNode) {
- return TraverseNode(DeclNode, DeclNode,
- [&] { return VisitorBase::TraverseDecl(DeclNode); },
- Parents);
+ auto *Vector = NodeOrVector.template get<ParentVector *>();
+ // Skip duplicates for types that have memoization data.
+ // We must check that the type has memoization data before calling
+ // std::find() because DynTypedNode::operator== can't compare all
+ // types.
+ bool Found = ParentStack.back().getMemoizationData() &&
+ std::find(Vector->begin(), Vector->end(),
+ ParentStack.back()) != Vector->end();
+ if (!Found)
+ Vector->push_back(ParentStack.back());
+ }
}
+ ParentStack.push_back(createDynTypedNode(Node));
+ bool Result = BaseTraverse();
+ ParentStack.pop_back();
+ return Result;
+ }
- bool TraverseStmt(Stmt *StmtNode) {
- return TraverseNode(StmtNode, StmtNode,
- [&] { return VisitorBase::TraverseStmt(StmtNode); },
- Parents);
- }
+ bool TraverseDecl(Decl *DeclNode) {
+ return TraverseNode(
+ DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
+ &Map.PointerParents);
+ }
- bool TraverseTypeLoc(TypeLoc TypeLocNode) {
- return TraverseNode(
- TypeLocNode, ast_type_traits::DynTypedNode::create(TypeLocNode),
- [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
- OtherParents);
- }
+ bool TraverseStmt(Stmt *StmtNode) {
+ return TraverseNode(
+ StmtNode, StmtNode, [&] { return VisitorBase::TraverseStmt(StmtNode); },
+ &Map.PointerParents);
+ }
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
- return TraverseNode(
- NNSLocNode, ast_type_traits::DynTypedNode::create(NNSLocNode),
- [&] {
- return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode);
- },
- OtherParents);
- }
+ bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ return TraverseNode(
+ TypeLocNode, ast_type_traits::DynTypedNode::create(TypeLocNode),
+ [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
+ &Map.OtherParents);
+ }
- ASTContext::ParentMapPointers *Parents;
- ASTContext::ParentMapOtherNodes *OtherParents;
- llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
- };
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
+ return TraverseNode(
+ NNSLocNode, ast_type_traits::DynTypedNode::create(NNSLocNode),
+ [&] { return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode); },
+ &Map.OtherParents);
+ }
-} // namespace
+ ParentMap &Map;
+ llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
+};
-template <typename NodeTy, typename MapTy>
-static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
- const MapTy &Map) {
- auto I = Map.find(Node);
- if (I == Map.end()) {
- return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
- }
- if (const auto *V =
- I->second.template dyn_cast<ASTContext::ParentVector *>()) {
- return llvm::makeArrayRef(*V);
- }
- return getSingleDynTypedNodeFromParentMap(I->second);
+ASTContext::ParentMap::ParentMap(ASTContext &Ctx) {
+ ASTVisitor(*this).TraverseAST(Ctx);
}
ASTContext::DynTypedNodeList
ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
- if (!PointerParents) {
- // We always need to run over the whole translation unit, as
+ if (!Parents)
+ // We build the parent map for the traversal scope (usually whole TU), as
// hasAncestor can escape any subtree.
- auto Maps = ParentMapASTVisitor::buildMap(*getTranslationUnitDecl());
- PointerParents.reset(Maps.first);
- OtherParents.reset(Maps.second);
- }
- if (Node.getNodeKind().hasPointerIdentity())
- return getDynNodeFromMap(Node.getMemoizationData(), *PointerParents);
- return getDynNodeFromMap(Node, *OtherParents);
+ Parents = llvm::make_unique<ParentMap>(*this);
+ return Parents->getParents(Node);
}
bool
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index 38a2fe9caf..43d8d7d38f 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDumperUtils.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CommentVisitor.h"
#include "clang/AST/DeclCXX.h"
@@ -22,6 +23,7 @@
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TextNodeDumper.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/Module.h"
@@ -35,178 +37,32 @@ using namespace clang::comments;
//===----------------------------------------------------------------------===//
namespace {
- // Colors used for various parts of the AST dump
- // Do not use bold yellow for any text. It is hard to read on white screens.
- struct TerminalColor {
- raw_ostream::Colors Color;
- bool Bold;
- };
+ class ASTDumper
+ : public ConstDeclVisitor<ASTDumper>,
+ public ConstStmtVisitor<ASTDumper>,
+ public ConstCommentVisitor<ASTDumper, void, const FullComment *>,
+ public TypeVisitor<ASTDumper> {
- // Red - CastColor
- // Green - TypeColor
- // Bold Green - DeclKindNameColor, UndeserializedColor
- // Yellow - AddressColor, LocationColor
- // Blue - CommentColor, NullColor, IndentColor
- // Bold Blue - AttrColor
- // Bold Magenta - StmtColor
- // Cyan - ValueKindColor, ObjectKindColor
- // Bold Cyan - ValueColor, DeclNameColor
-
- // Decl kind names (VarDecl, FunctionDecl, etc)
- static const TerminalColor DeclKindNameColor = { raw_ostream::GREEN, true };
- // Attr names (CleanupAttr, GuardedByAttr, etc)
- static const TerminalColor AttrColor = { raw_ostream::BLUE, true };
- // Statement names (DeclStmt, ImplicitCastExpr, etc)
- static const TerminalColor StmtColor = { raw_ostream::MAGENTA, true };
- // Comment names (FullComment, ParagraphComment, TextComment, etc)
- static const TerminalColor CommentColor = { raw_ostream::BLUE, false };
-
- // Type names (int, float, etc, plus user defined types)
- static const TerminalColor TypeColor = { raw_ostream::GREEN, false };
-
- // Pointer address
- static const TerminalColor AddressColor = { raw_ostream::YELLOW, false };
- // Source locations
- static const TerminalColor LocationColor = { raw_ostream::YELLOW, false };
-
- // lvalue/xvalue
- static const TerminalColor ValueKindColor = { raw_ostream::CYAN, false };
- // bitfield/objcproperty/objcsubscript/vectorcomponent
- static const TerminalColor ObjectKindColor = { raw_ostream::CYAN, false };
-
- // Null statements
- static const TerminalColor NullColor = { raw_ostream::BLUE, false };
-
- // Undeserialized entities
- static const TerminalColor UndeserializedColor = { raw_ostream::GREEN, true };
-
- // CastKind from CastExpr's
- static const TerminalColor CastColor = { raw_ostream::RED, false };
-
- // Value of the statement
- static const TerminalColor ValueColor = { raw_ostream::CYAN, true };
- // Decl names
- static const TerminalColor DeclNameColor = { raw_ostream::CYAN, true };
-
- // Indents ( `, -. | )
- static const TerminalColor IndentColor = { raw_ostream::BLUE, false };
+ TextTreeStructure TreeStructure;
+ TextNodeDumper NodeDumper;
- class ASTDumper
- : public ConstDeclVisitor<ASTDumper>, public ConstStmtVisitor<ASTDumper>,
- public ConstCommentVisitor<ASTDumper>, public TypeVisitor<ASTDumper> {
raw_ostream &OS;
- const CommandTraits *Traits;
- const SourceManager *SM;
/// The policy to use for printing; can be defaulted.
PrintingPolicy PrintPolicy;
- /// Pending[i] is an action to dump an entity at level i.
- llvm::SmallVector<std::function<void(bool isLastChild)>, 32> Pending;
-
/// Indicates whether we should trigger deserialization of nodes that had
/// not already been loaded.
bool Deserialize = false;
- /// Indicates whether we're at the top level.
- bool TopLevel = true;
-
- /// Indicates if we're handling the first child after entering a new depth.
- bool FirstChild = true;
-
- /// Prefix for currently-being-dumped entity.
- std::string Prefix;
-
- /// Keep track of the last location we print out so that we can
- /// print out deltas from then on out.
- const char *LastLocFilename = "";
- unsigned LastLocLine = ~0U;
-
- /// The \c FullComment parent of the comment being dumped.
- const FullComment *FC = nullptr;
-
- bool ShowColors;
+ const bool ShowColors;
/// Dump a child of the current node.
template<typename Fn> void dumpChild(Fn doDumpChild) {
- // If we're at the top level, there's nothing interesting to do; just
- // run the dumper.
- if (TopLevel) {
- TopLevel = false;
- doDumpChild();
- while (!Pending.empty()) {
- Pending.back()(true);
- Pending.pop_back();
- }
- Prefix.clear();
- OS << "\n";
- TopLevel = true;
- return;
- }
-
- const FullComment *OrigFC = FC;
- auto dumpWithIndent = [this, doDumpChild, OrigFC](bool isLastChild) {
- // Print out the appropriate tree structure and work out the prefix for
- // children of this node. For instance:
- //
- // A Prefix = ""
- // |-B Prefix = "| "
- // | `-C Prefix = "| "
- // `-D Prefix = " "
- // |-E Prefix = " | "
- // `-F Prefix = " "
- // G Prefix = ""
- //
- // Note that the first level gets no prefix.
- {
- OS << '\n';
- ColorScope Color(*this, IndentColor);
- OS << Prefix << (isLastChild ? '`' : '|') << '-';
- this->Prefix.push_back(isLastChild ? ' ' : '|');
- this->Prefix.push_back(' ');
- }
-
- FirstChild = true;
- unsigned Depth = Pending.size();
-
- FC = OrigFC;
- doDumpChild();
-
- // If any children are left, they're the last at their nesting level.
- // Dump those ones out now.
- while (Depth < Pending.size()) {
- Pending.back()(true);
- this->Pending.pop_back();
- }
-
- // Restore the old prefix.
- this->Prefix.resize(Prefix.size() - 2);
- };
-
- if (FirstChild) {
- Pending.push_back(std::move(dumpWithIndent));
- } else {
- Pending.back()(false);
- Pending.back() = std::move(dumpWithIndent);
- }
- FirstChild = false;
+ TreeStructure.addChild(doDumpChild);
}
- class ColorScope {
- ASTDumper &Dumper;
- public:
- ColorScope(ASTDumper &Dumper, TerminalColor Color)
- : Dumper(Dumper) {
- if (Dumper.ShowColors)
- Dumper.OS.changeColor(Color.Color, Color.Bold);
- }
- ~ColorScope() {
- if (Dumper.ShowColors)
- Dumper.OS.resetColor();
- }
- };
-
public:
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM)
@@ -219,40 +75,43 @@ namespace {
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM, bool ShowColors,
const PrintingPolicy &PrintPolicy)
- : OS(OS), Traits(Traits), SM(SM), PrintPolicy(PrintPolicy),
- ShowColors(ShowColors) {}
+ : TreeStructure(OS, ShowColors),
+ NodeDumper(OS, ShowColors, SM, PrintPolicy, Traits), OS(OS),
+ PrintPolicy(PrintPolicy), ShowColors(ShowColors) {}
void setDeserialize(bool D) { Deserialize = D; }
void dumpDecl(const Decl *D);
void dumpStmt(const Stmt *S);
- void dumpFullComment(const FullComment *C);
// Utilities
- void dumpPointer(const void *Ptr);
- void dumpSourceRange(SourceRange R);
- void dumpLocation(SourceLocation Loc);
- void dumpBareType(QualType T, bool Desugar = true);
- void dumpType(QualType T);
+ void dumpType(QualType T) { NodeDumper.dumpType(T); }
void dumpTypeAsChild(QualType T);
void dumpTypeAsChild(const Type *T);
- void dumpBareDeclRef(const Decl *Node);
void dumpDeclRef(const Decl *Node, const char *Label = nullptr);
- void dumpName(const NamedDecl *D);
- bool hasNodes(const DeclContext *DC);
+ void dumpBareDeclRef(const Decl *Node) { NodeDumper.dumpBareDeclRef(Node); }
void dumpDeclContext(const DeclContext *DC);
void dumpLookups(const DeclContext *DC, bool DumpDecls);
void dumpAttr(const Attr *A);
// C++ Utilities
- void dumpAccessSpecifier(AccessSpecifier AS);
void dumpCXXCtorInitializer(const CXXCtorInitializer *Init);
void dumpTemplateParameters(const TemplateParameterList *TPL);
void dumpTemplateArgumentListInfo(const TemplateArgumentListInfo &TALI);
- void dumpTemplateArgumentLoc(const TemplateArgumentLoc &A);
+ void dumpTemplateArgumentLoc(const TemplateArgumentLoc &A,
+ const Decl *From = nullptr,
+ const char *Label = nullptr);
void dumpTemplateArgumentList(const TemplateArgumentList &TAL);
void dumpTemplateArgument(const TemplateArgument &A,
- SourceRange R = SourceRange());
+ SourceRange R = SourceRange(),
+ const Decl *From = nullptr,
+ const char *Label = nullptr);
+ template <typename SpecializationDecl>
+ void dumpTemplateDeclSpecialization(const SpecializationDecl *D,
+ bool DumpExplicitInst,
+ bool DumpRefOnly);
+ template <typename TemplateDecl>
+ void dumpTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst);
// Objective-C utilities.
void dumpObjCTypeParamList(const ObjCTypeParamList *typeParams);
@@ -294,20 +153,26 @@ namespace {
}
void VisitVariableArrayType(const VariableArrayType *T) {
OS << " ";
- dumpSourceRange(T->getBracketsRange());
+ NodeDumper.dumpSourceRange(T->getBracketsRange());
VisitArrayType(T);
dumpStmt(T->getSizeExpr());
}
void VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
- VisitArrayType(T);
+ switch (T->getSizeModifier()) {
+ case ArrayType::Normal: break;
+ case ArrayType::Static: OS << " static"; break;
+ case ArrayType::Star: OS << " *"; break;
+ }
+ OS << " " << T->getIndexTypeQualifiers().getAsString();
OS << " ";
- dumpSourceRange(T->getBracketsRange());
+ NodeDumper.dumpSourceRange(T->getBracketsRange());
+ dumpTypeAsChild(T->getElementType());
dumpStmt(T->getSizeExpr());
}
void VisitDependentSizedExtVectorType(
const DependentSizedExtVectorType *T) {
OS << " ";
- dumpLocation(T->getAttributeLoc());
+ NodeDumper.dumpLocation(T->getAttributeLoc());
dumpTypeAsChild(T->getElementType());
dumpStmt(T->getSizeExpr());
}
@@ -334,9 +199,10 @@ namespace {
void VisitFunctionProtoType(const FunctionProtoType *T) {
auto EPI = T->getExtProtoInfo();
if (EPI.HasTrailingReturn) OS << " trailing_return";
- if (T->isConst()) OS << " const";
- if (T->isVolatile()) OS << " volatile";
- if (T->isRestrict()) OS << " restrict";
+
+ if (!T->getTypeQuals().empty())
+ OS << " " << T->getTypeQuals().getAsString();
+
switch (EPI.RefQualifier) {
case RQ_None: break;
case RQ_LValue: OS << " &"; break;
@@ -461,12 +327,6 @@ namespace {
void VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D);
void VisitCXXRecordDecl(const CXXRecordDecl *D);
void VisitStaticAssertDecl(const StaticAssertDecl *D);
- template<typename SpecializationDecl>
- void VisitTemplateDeclSpecialization(const SpecializationDecl *D,
- bool DumpExplicitInst,
- bool DumpRefOnly);
- template<typename TemplateDecl>
- void VisitTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst);
void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
void VisitClassTemplateDecl(const ClassTemplateDecl *D);
void VisitClassTemplateSpecializationDecl(
@@ -508,19 +368,22 @@ namespace {
void VisitBlockDecl(const BlockDecl *D);
// Stmts.
- void VisitStmt(const Stmt *Node);
void VisitDeclStmt(const DeclStmt *Node);
void VisitAttributedStmt(const AttributedStmt *Node);
+ void VisitIfStmt(const IfStmt *Node);
+ void VisitSwitchStmt(const SwitchStmt *Node);
+ void VisitWhileStmt(const WhileStmt *Node);
void VisitLabelStmt(const LabelStmt *Node);
void VisitGotoStmt(const GotoStmt *Node);
void VisitCXXCatchStmt(const CXXCatchStmt *Node);
+ void VisitCaseStmt(const CaseStmt *Node);
void VisitCapturedStmt(const CapturedStmt *Node);
// OpenMP
void VisitOMPExecutableDirective(const OMPExecutableDirective *Node);
// Exprs
- void VisitExpr(const Expr *Node);
+ void VisitCallExpr(const CallExpr *Node);
void VisitCastExpr(const CastExpr *Node);
void VisitImplicitCastExpr(const ImplicitCastExpr *Node);
void VisitDeclRefExpr(const DeclRefExpr *Node);
@@ -531,8 +394,6 @@ namespace {
void VisitFloatingLiteral(const FloatingLiteral *Node);
void VisitStringLiteral(const StringLiteral *Str);
void VisitInitListExpr(const InitListExpr *ILE);
- void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *ILE);
- void VisitArrayInitIndexExpr(const ArrayInitIndexExpr *ILE);
void VisitUnaryOperator(const UnaryOperator *Node);
void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Node);
void VisitMemberExpr(const MemberExpr *Node);
@@ -557,9 +418,7 @@ namespace {
void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node);
void VisitExprWithCleanups(const ExprWithCleanups *Node);
void VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node);
- void dumpCXXTemporary(const CXXTemporary *Temporary);
void VisitLambdaExpr(const LambdaExpr *Node) {
- VisitExpr(Node);
dumpDecl(Node->getLambdaClass());
}
void VisitSizeOfPackExpr(const SizeOfPackExpr *Node);
@@ -579,22 +438,7 @@ namespace {
void VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node);
// Comments.
- const char *getCommandName(unsigned CommandID);
- void dumpComment(const Comment *C);
-
- // Inline comments.
- void visitTextComment(const TextComment *C);
- void visitInlineCommandComment(const InlineCommandComment *C);
- void visitHTMLStartTagComment(const HTMLStartTagComment *C);
- void visitHTMLEndTagComment(const HTMLEndTagComment *C);
-
- // Block comments.
- void visitBlockCommandComment(const BlockCommandComment *C);
- void visitParamCommandComment(const ParamCommandComment *C);
- void visitTParamCommandComment(const TParamCommandComment *C);
- void visitVerbatimBlockComment(const VerbatimBlockComment *C);
- void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C);
- void visitVerbatimLineComment(const VerbatimLineComment *C);
+ void dumpComment(const Comment *C, const FullComment *FC);
};
}
@@ -602,77 +446,6 @@ namespace {
// Utilities
//===----------------------------------------------------------------------===//
-void ASTDumper::dumpPointer(const void *Ptr) {
- ColorScope Color(*this, AddressColor);
- OS << ' ' << Ptr;
-}
-
-void ASTDumper::dumpLocation(SourceLocation Loc) {
- if (!SM)
- return;
-
- ColorScope Color(*this, LocationColor);
- SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
-
- // The general format we print out is filename:line:col, but we drop pieces
- // that haven't changed since the last loc printed.
- PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
-
- if (PLoc.isInvalid()) {
- OS << "<invalid sloc>";
- return;
- }
-
- if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
- OS << PLoc.getFilename() << ':' << PLoc.getLine()
- << ':' << PLoc.getColumn();
- LastLocFilename = PLoc.getFilename();
- LastLocLine = PLoc.getLine();
- } else if (PLoc.getLine() != LastLocLine) {
- OS << "line" << ':' << PLoc.getLine()
- << ':' << PLoc.getColumn();
- LastLocLine = PLoc.getLine();
- } else {
- OS << "col" << ':' << PLoc.getColumn();
- }
-}
-
-void ASTDumper::dumpSourceRange(SourceRange R) {
- // Can't translate locations if a SourceManager isn't available.
- if (!SM)
- return;
-
- OS << " <";
- dumpLocation(R.getBegin());
- if (R.getBegin() != R.getEnd()) {
- OS << ", ";
- dumpLocation(R.getEnd());
- }
- OS << ">";
-
- // <t2.c:123:421[blah], t2.c:412:321>
-
-}
-
-void ASTDumper::dumpBareType(QualType T, bool Desugar) {
- ColorScope Color(*this, TypeColor);
-
- SplitQualType T_split = T.split();
- OS << "'" << QualType::getAsString(T_split, PrintPolicy) << "'";
-
- if (Desugar && !T.isNull()) {
- // If the type is sugared, also dump a (shallow) desugared type.
- SplitQualType D_split = T.getSplitDesugaredType();
- if (T_split != D_split)
- OS << ":'" << QualType::getAsString(D_split, PrintPolicy) << "'";
- }
-}
-
-void ASTDumper::dumpType(QualType T) {
- OS << ' ';
- dumpBareType(T);
-}
-
void ASTDumper::dumpTypeAsChild(QualType T) {
SplitQualType SQT = T.split();
if (!SQT.Quals.hasQualifiers())
@@ -680,9 +453,9 @@ void ASTDumper::dumpTypeAsChild(QualType T) {
dumpChild([=] {
OS << "QualType";
- dumpPointer(T.getAsOpaquePtr());
+ NodeDumper.dumpPointer(T.getAsOpaquePtr());
OS << " ";
- dumpBareType(T, false);
+ NodeDumper.dumpBareType(T, false);
OS << " " << T.split().Quals.getAsString();
dumpTypeAsChild(T.split().Ty);
});
@@ -691,27 +464,27 @@ void ASTDumper::dumpTypeAsChild(QualType T) {
void ASTDumper::dumpTypeAsChild(const Type *T) {
dumpChild([=] {
if (!T) {
- ColorScope Color(*this, NullColor);
+ ColorScope Color(OS, ShowColors, NullColor);
OS << "<<<NULL>>>";
return;
}
if (const LocInfoType *LIT = llvm::dyn_cast<LocInfoType>(T)) {
{
- ColorScope Color(*this, TypeColor);
+ ColorScope Color(OS, ShowColors, TypeColor);
OS << "LocInfo Type";
}
- dumpPointer(T);
+ NodeDumper.dumpPointer(T);
dumpTypeAsChild(LIT->getTypeSourceInfo()->getType());
return;
}
{
- ColorScope Color(*this, TypeColor);
+ ColorScope Color(OS, ShowColors, TypeColor);
OS << T->getTypeClassName() << "Type";
}
- dumpPointer(T);
+ NodeDumper.dumpPointer(T);
OS << " ";
- dumpBareType(QualType(T, 0), false);
+ NodeDumper.dumpBareType(QualType(T, 0), false);
QualType SingleStepDesugar =
T->getLocallyUnqualifiedSingleStepDesugaredType();
@@ -735,28 +508,6 @@ void ASTDumper::dumpTypeAsChild(const Type *T) {
});
}
-void ASTDumper::dumpBareDeclRef(const Decl *D) {
- if (!D) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
- return;
- }
-
- {
- ColorScope Color(*this, DeclKindNameColor);
- OS << D->getDeclKindName();
- }
- dumpPointer(D);
-
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
- ColorScope Color(*this, DeclNameColor);
- OS << " '" << ND->getDeclName() << '\'';
- }
-
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(D))
- dumpType(VD->getType());
-}
-
void ASTDumper::dumpDeclRef(const Decl *D, const char *Label) {
if (!D)
return;
@@ -768,22 +519,6 @@ void ASTDumper::dumpDeclRef(const Decl *D, const char *Label) {
});
}
-void ASTDumper::dumpName(const NamedDecl *ND) {
- if (ND->getDeclName()) {
- ColorScope Color(*this, DeclNameColor);
- OS << ' ' << ND->getNameAsString();
- }
-}
-
-bool ASTDumper::hasNodes(const DeclContext *DC) {
- if (!DC)
- return false;
-
- return DC->hasExternalLexicalStorage() ||
- (Deserialize ? DC->decls_begin() != DC->decls_end()
- : DC->noload_decls_begin() != DC->noload_decls_end());
-}
-
void ASTDumper::dumpDeclContext(const DeclContext *DC) {
if (!DC)
return;
@@ -792,8 +527,8 @@ void ASTDumper::dumpDeclContext(const DeclContext *DC) {
dumpDecl(D);
if (DC->hasExternalLexicalStorage()) {
- dumpChild([=]{
- ColorScope Color(*this, UndeserializedColor);
+ dumpChild([=] {
+ ColorScope Color(OS, ShowColors, UndeserializedColor);
OS << "<undeserialized declarations>";
});
}
@@ -802,12 +537,12 @@ void ASTDumper::dumpDeclContext(const DeclContext *DC) {
void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
dumpChild([=] {
OS << "StoredDeclsMap ";
- dumpBareDeclRef(cast<Decl>(DC));
+ NodeDumper.dumpBareDeclRef(cast<Decl>(DC));
const DeclContext *Primary = DC->getPrimaryContext();
if (Primary != DC) {
OS << " primary";
- dumpPointer(cast<Decl>(Primary));
+ NodeDumper.dumpPointer(cast<Decl>(Primary));
}
bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage();
@@ -822,14 +557,14 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
dumpChild([=] {
OS << "DeclarationName ";
{
- ColorScope Color(*this, DeclNameColor);
+ ColorScope Color(OS, ShowColors, DeclNameColor);
OS << '\'' << Name << '\'';
}
for (DeclContextLookupResult::iterator RI = R.begin(), RE = R.end();
RI != RE; ++RI) {
dumpChild([=] {
- dumpBareDeclRef(*RI);
+ NodeDumper.dumpBareDeclRef(*RI);
if ((*RI)->isHidden())
OS << " hidden";
@@ -851,7 +586,7 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
if (HasUndeserializedLookups) {
dumpChild([=] {
- ColorScope Color(*this, UndeserializedColor);
+ ColorScope Color(OS, ShowColors, UndeserializedColor);
OS << "<undeserialized lookups>";
});
}
@@ -861,7 +596,7 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
void ASTDumper::dumpAttr(const Attr *A) {
dumpChild([=] {
{
- ColorScope Color(*this, AttrColor);
+ ColorScope Color(OS, ShowColors, AttrColor);
switch (A->getKind()) {
#define ATTR(X) case attr::X: OS << #X; break;
@@ -869,8 +604,8 @@ void ASTDumper::dumpAttr(const Attr *A) {
}
OS << "Attr";
}
- dumpPointer(A);
- dumpSourceRange(A->getRange());
+ NodeDumper.dumpPointer(A);
+ NodeDumper.dumpSourceRange(A->getRange());
if (A->isInherited())
OS << " Inherited";
if (A->isImplicit())
@@ -912,32 +647,16 @@ static void dumpPreviousDecl(raw_ostream &OS, const Decl *D) {
// C++ Utilities
//===----------------------------------------------------------------------===//
-void ASTDumper::dumpAccessSpecifier(AccessSpecifier AS) {
- switch (AS) {
- case AS_none:
- break;
- case AS_public:
- OS << "public";
- break;
- case AS_protected:
- OS << "protected";
- break;
- case AS_private:
- OS << "private";
- break;
- }
-}
-
void ASTDumper::dumpCXXCtorInitializer(const CXXCtorInitializer *Init) {
dumpChild([=] {
OS << "CXXCtorInitializer";
if (Init->isAnyMemberInitializer()) {
OS << ' ';
- dumpBareDeclRef(Init->getAnyMember());
+ NodeDumper.dumpBareDeclRef(Init->getAnyMember());
} else if (Init->isBaseInitializer()) {
- dumpType(QualType(Init->getBaseClass(), 0));
+ NodeDumper.dumpType(QualType(Init->getBaseClass(), 0));
} else if (Init->isDelegatingInitializer()) {
- dumpType(Init->getTypeSourceInfo()->getType());
+ NodeDumper.dumpType(Init->getTypeSourceInfo()->getType());
} else {
llvm_unreachable("Unknown initializer type");
}
@@ -960,8 +679,9 @@ void ASTDumper::dumpTemplateArgumentListInfo(
dumpTemplateArgumentLoc(TALI[i]);
}
-void ASTDumper::dumpTemplateArgumentLoc(const TemplateArgumentLoc &A) {
- dumpTemplateArgument(A.getArgument(), A.getSourceRange());
+void ASTDumper::dumpTemplateArgumentLoc(const TemplateArgumentLoc &A,
+ const Decl *From, const char *Label) {
+ dumpTemplateArgument(A.getArgument(), A.getSourceRange(), From, Label);
}
void ASTDumper::dumpTemplateArgumentList(const TemplateArgumentList &TAL) {
@@ -969,11 +689,15 @@ void ASTDumper::dumpTemplateArgumentList(const TemplateArgumentList &TAL) {
dumpTemplateArgument(TAL[i]);
}
-void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R) {
+void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R,
+ const Decl *From, const char *Label) {
dumpChild([=] {
OS << "TemplateArgument";
if (R.isValid())
- dumpSourceRange(R);
+ NodeDumper.dumpSourceRange(R);
+
+ if (From)
+ dumpDeclRef(From, Label);
switch (A.getKind()) {
case TemplateArgument::Null:
@@ -981,7 +705,7 @@ void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R) {
break;
case TemplateArgument::Type:
OS << " type";
- dumpType(A.getAsType());
+ NodeDumper.dumpType(A.getAsType());
break;
case TemplateArgument::Declaration:
OS << " decl";
@@ -1034,22 +758,22 @@ void ASTDumper::dumpObjCTypeParamList(const ObjCTypeParamList *typeParams) {
void ASTDumper::dumpDecl(const Decl *D) {
dumpChild([=] {
if (!D) {
- ColorScope Color(*this, NullColor);
+ ColorScope Color(OS, ShowColors, NullColor);
OS << "<<<NULL>>>";
return;
}
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << D->getDeclKindName() << "Decl";
}
- dumpPointer(D);
+ NodeDumper.dumpPointer(D);
if (D->getLexicalDeclContext() != D->getDeclContext())
OS << " parent " << cast<Decl>(D->getDeclContext());
dumpPreviousDecl(OS, D);
- dumpSourceRange(D->getSourceRange());
+ NodeDumper.dumpSourceRange(D->getSourceRange());
OS << ' ';
- dumpLocation(D->getLocation());
+ NodeDumper.dumpLocation(D->getLocation());
if (D->isFromASTFile())
OS << " imported";
if (Module *M = D->getOwningModule())
@@ -1082,22 +806,25 @@ void ASTDumper::dumpDecl(const Decl *D) {
if (const FullComment *Comment =
D->getASTContext().getLocalCommentForDeclUncached(D))
- dumpFullComment(Comment);
+ dumpComment(Comment, Comment);
// Decls within functions are visited by the body.
- if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D) &&
- hasNodes(dyn_cast<DeclContext>(D)))
- dumpDeclContext(cast<DeclContext>(D));
+ if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D)) {
+ auto DC = dyn_cast<DeclContext>(D);
+ if (DC &&
+ (DC->hasExternalLexicalStorage() ||
+ (Deserialize ? DC->decls_begin() != DC->decls_end()
+ : DC->noload_decls_begin() != DC->noload_decls_end())))
+ dumpDeclContext(DC);
+ }
});
}
-void ASTDumper::VisitLabelDecl(const LabelDecl *D) {
- dumpName(D);
-}
+void ASTDumper::VisitLabelDecl(const LabelDecl *D) { NodeDumper.dumpName(D); }
void ASTDumper::VisitTypedefDecl(const TypedefDecl *D) {
- dumpName(D);
- dumpType(D->getUnderlyingType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getUnderlyingType());
if (D->isModulePrivate())
OS << " __module_private__";
dumpTypeAsChild(D->getUnderlyingType());
@@ -1110,16 +837,16 @@ void ASTDumper::VisitEnumDecl(const EnumDecl *D) {
else
OS << " struct";
}
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->isModulePrivate())
OS << " __module_private__";
if (D->isFixed())
- dumpType(D->getIntegerType());
+ NodeDumper.dumpType(D->getIntegerType());
}
void ASTDumper::VisitRecordDecl(const RecordDecl *D) {
OS << ' ' << D->getKindName();
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->isModulePrivate())
OS << " __module_private__";
if (D->isCompleteDefinition())
@@ -1127,23 +854,23 @@ void ASTDumper::VisitRecordDecl(const RecordDecl *D) {
}
void ASTDumper::VisitEnumConstantDecl(const EnumConstantDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (const Expr *Init = D->getInitExpr())
dumpStmt(Init);
}
void ASTDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
for (auto *Child : D->chain())
dumpDeclRef(Child);
}
void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
@@ -1224,8 +951,8 @@ void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
}
void ASTDumper::VisitFieldDecl(const FieldDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (D->isMutable())
OS << " mutable";
if (D->isModulePrivate())
@@ -1238,8 +965,8 @@ void ASTDumper::VisitFieldDecl(const FieldDecl *D) {
}
void ASTDumper::VisitVarDecl(const VarDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
@@ -1273,8 +1000,8 @@ void ASTDumper::VisitDecompositionDecl(const DecompositionDecl *D) {
}
void ASTDumper::VisitBindingDecl(const BindingDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (auto *E = D->getBinding())
dumpStmt(E);
}
@@ -1321,12 +1048,13 @@ void ASTDumper::VisitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
}
void ASTDumper::VisitOMPDeclareReductionDecl(const OMPDeclareReductionDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
OS << " combiner";
- dumpStmt(D->getCombiner());
- if (auto *Initializer = D->getInitializer()) {
+ NodeDumper.dumpPointer(D->getCombiner());
+ if (const auto *Initializer = D->getInitializer()) {
OS << " initializer";
+ NodeDumper.dumpPointer(Initializer);
switch (D->getInitializerKind()) {
case OMPDeclareReductionDecl::DirectInit:
OS << " omp_priv = ";
@@ -1337,33 +1065,36 @@ void ASTDumper::VisitOMPDeclareReductionDecl(const OMPDeclareReductionDecl *D) {
case OMPDeclareReductionDecl::CallInit:
break;
}
- dumpStmt(Initializer);
}
+
+ dumpStmt(D->getCombiner());
+ if (const auto *Initializer = D->getInitializer())
+ dumpStmt(Initializer);
}
void ASTDumper::VisitOMPRequiresDecl(const OMPRequiresDecl *D) {
for (auto *C : D->clauselists()) {
dumpChild([=] {
if (!C) {
- ColorScope Color(*this, NullColor);
+ ColorScope Color(OS, ShowColors, NullColor);
OS << "<<<NULL>>> OMPClause";
return;
}
{
- ColorScope Color(*this, AttrColor);
+ ColorScope Color(OS, ShowColors, AttrColor);
StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
<< ClauseName.drop_front() << "Clause";
}
- dumpPointer(C);
- dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc()));
+ NodeDumper.dumpPointer(C);
+ NodeDumper.dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc()));
});
}
}
void ASTDumper::VisitOMPCapturedExprDecl(const OMPCapturedExprDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
dumpStmt(D->getInit());
}
@@ -1372,7 +1103,7 @@ void ASTDumper::VisitOMPCapturedExprDecl(const OMPCapturedExprDecl *D) {
//===----------------------------------------------------------------------===//
void ASTDumper::VisitNamespaceDecl(const NamespaceDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->isInline())
OS << " inline";
if (!D->isOriginalNamespace())
@@ -1381,22 +1112,22 @@ void ASTDumper::VisitNamespaceDecl(const NamespaceDecl *D) {
void ASTDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
OS << ' ';
- dumpBareDeclRef(D->getNominatedNamespace());
+ NodeDumper.dumpBareDeclRef(D->getNominatedNamespace());
}
void ASTDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpDeclRef(D->getAliasedNamespace());
}
void ASTDumper::VisitTypeAliasDecl(const TypeAliasDecl *D) {
- dumpName(D);
- dumpType(D->getUnderlyingType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getUnderlyingType());
dumpTypeAsChild(D->getUnderlyingType());
}
void ASTDumper::VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
dumpDecl(D->getTemplatedDecl());
}
@@ -1408,7 +1139,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "DefinitionData";
}
#define FLAG(fn, name) if (D->fn()) OS << " " #name;
@@ -1436,7 +1167,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "DefaultConstructor";
}
FLAG(hasDefaultConstructor, exists);
@@ -1450,7 +1181,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "CopyConstructor";
}
FLAG(hasSimpleCopyConstructor, simple);
@@ -1468,7 +1199,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "MoveConstructor";
}
FLAG(hasMoveConstructor, exists);
@@ -1485,7 +1216,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "CopyAssignment";
}
FLAG(hasTrivialCopyAssignment, trivial);
@@ -1499,7 +1230,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "MoveAssignment";
}
FLAG(hasMoveAssignment, exists);
@@ -1513,7 +1244,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "Destructor";
}
FLAG(hasSimpleDestructor, simple);
@@ -1532,8 +1263,8 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
if (I.isVirtual())
OS << "virtual ";
- dumpAccessSpecifier(I.getAccessSpecifier());
- dumpType(I.getType());
+ NodeDumper.dumpAccessSpecifier(I.getAccessSpecifier());
+ NodeDumper.dumpType(I.getType());
if (I.isPackExpansion())
OS << "...";
});
@@ -1545,10 +1276,10 @@ void ASTDumper::VisitStaticAssertDecl(const StaticAssertDecl *D) {
dumpStmt(D->getMessage());
}
-template<typename SpecializationDecl>
-void ASTDumper::VisitTemplateDeclSpecialization(const SpecializationDecl *D,
- bool DumpExplicitInst,
- bool DumpRefOnly) {
+template <typename SpecializationDecl>
+void ASTDumper::dumpTemplateDeclSpecialization(const SpecializationDecl *D,
+ bool DumpExplicitInst,
+ bool DumpRefOnly) {
bool DumpedAny = false;
for (auto *RedeclWithBadType : D->redecls()) {
// FIXME: The redecls() range sometimes has elements of a less-specific
@@ -1587,28 +1318,27 @@ void ASTDumper::VisitTemplateDeclSpecialization(const SpecializationDecl *D,
dumpDeclRef(D);
}
-template<typename TemplateDecl>
-void ASTDumper::VisitTemplateDecl(const TemplateDecl *D,
- bool DumpExplicitInst) {
- dumpName(D);
+template <typename TemplateDecl>
+void ASTDumper::dumpTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst) {
+ NodeDumper.dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
dumpDecl(D->getTemplatedDecl());
for (auto *Child : D->specializations())
- VisitTemplateDeclSpecialization(Child, DumpExplicitInst,
- !D->isCanonicalDecl());
+ dumpTemplateDeclSpecialization(Child, DumpExplicitInst,
+ !D->isCanonicalDecl());
}
void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
// FIXME: We don't add a declaration of a function template specialization
// to its context when it's explicitly instantiated, so dump explicit
// instantiations when we dump the template itself.
- VisitTemplateDecl(D, true);
+ dumpTemplateDecl(D, true);
}
void ASTDumper::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
- VisitTemplateDecl(D, false);
+ dumpTemplateDecl(D, false);
}
void ASTDumper::VisitClassTemplateSpecializationDecl(
@@ -1631,11 +1361,11 @@ void ASTDumper::VisitClassScopeFunctionSpecializationDecl(
}
void ASTDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) {
- VisitTemplateDecl(D, false);
+ dumpTemplateDecl(D, false);
}
void ASTDumper::VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
}
@@ -1659,25 +1389,25 @@ void ASTDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
OS << " depth " << D->getDepth() << " index " << D->getIndex();
if (D->isParameterPack())
OS << " ...";
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->hasDefaultArgument())
- dumpTemplateArgument(D->getDefaultArgument());
- if (auto *From = D->getDefaultArgStorage().getInheritedFrom())
- dumpDeclRef(From, D->defaultArgumentWasInherited() ? "inherited from"
- : "previous");
+ dumpTemplateArgument(D->getDefaultArgument(), SourceRange(),
+ D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from"
+ : "previous");
}
void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
- dumpType(D->getType());
+ NodeDumper.dumpType(D->getType());
OS << " depth " << D->getDepth() << " index " << D->getIndex();
if (D->isParameterPack())
OS << " ...";
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->hasDefaultArgument())
- dumpTemplateArgument(D->getDefaultArgument());
- if (auto *From = D->getDefaultArgStorage().getInheritedFrom())
- dumpDeclRef(From, D->defaultArgumentWasInherited() ? "inherited from"
- : "previous");
+ dumpTemplateArgument(D->getDefaultArgument(), SourceRange(),
+ D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from"
+ : "previous");
}
void ASTDumper::VisitTemplateTemplateParmDecl(
@@ -1685,13 +1415,12 @@ void ASTDumper::VisitTemplateTemplateParmDecl(
OS << " depth " << D->getDepth() << " index " << D->getIndex();
if (D->isParameterPack())
OS << " ...";
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
if (D->hasDefaultArgument())
- dumpTemplateArgumentLoc(D->getDefaultArgument());
- if (auto *From = D->getDefaultArgStorage().getInheritedFrom())
- dumpDeclRef(From, D->defaultArgumentWasInherited() ? "inherited from"
- : "previous");
+ dumpTemplateArgumentLoc(
+ D->getDefaultArgument(), D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from" : "previous");
}
void ASTDumper::VisitUsingDecl(const UsingDecl *D) {
@@ -1714,12 +1443,12 @@ void ASTDumper::VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D)
if (D->getQualifier())
D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getNameAsString();
- dumpType(D->getType());
+ NodeDumper.dumpType(D->getType());
}
void ASTDumper::VisitUsingShadowDecl(const UsingShadowDecl *D) {
OS << ' ';
- dumpBareDeclRef(D->getTargetDecl());
+ NodeDumper.dumpBareDeclRef(D->getTargetDecl());
if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
dumpTypeAsChild(TD->getTypeForDecl());
}
@@ -1731,21 +1460,21 @@ void ASTDumper::VisitConstructorUsingShadowDecl(
dumpChild([=] {
OS << "target ";
- dumpBareDeclRef(D->getTargetDecl());
+ NodeDumper.dumpBareDeclRef(D->getTargetDecl());
});
dumpChild([=] {
OS << "nominated ";
- dumpBareDeclRef(D->getNominatedBaseClass());
+ NodeDumper.dumpBareDeclRef(D->getNominatedBaseClass());
OS << ' ';
- dumpBareDeclRef(D->getNominatedBaseClassShadowDecl());
+ NodeDumper.dumpBareDeclRef(D->getNominatedBaseClassShadowDecl());
});
dumpChild([=] {
OS << "constructed ";
- dumpBareDeclRef(D->getConstructedBaseClass());
+ NodeDumper.dumpBareDeclRef(D->getConstructedBaseClass());
OS << ' ';
- dumpBareDeclRef(D->getConstructedBaseClassShadowDecl());
+ NodeDumper.dumpBareDeclRef(D->getConstructedBaseClassShadowDecl());
});
}
@@ -1758,12 +1487,12 @@ void ASTDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
void ASTDumper::VisitAccessSpecDecl(const AccessSpecDecl *D) {
OS << ' ';
- dumpAccessSpecifier(D->getAccess());
+ NodeDumper.dumpAccessSpecifier(D->getAccess());
}
void ASTDumper::VisitFriendDecl(const FriendDecl *D) {
if (TypeSourceInfo *T = D->getFriendType())
- dumpType(T->getType());
+ NodeDumper.dumpType(T->getType());
else
dumpDecl(D->getFriendDecl());
}
@@ -1773,8 +1502,8 @@ void ASTDumper::VisitFriendDecl(const FriendDecl *D) {
//===----------------------------------------------------------------------===//
void ASTDumper::VisitObjCIvarDecl(const ObjCIvarDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (D->getSynthesize())
OS << " synthesize";
@@ -1802,8 +1531,8 @@ void ASTDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
OS << " -";
else
OS << " +";
- dumpName(D);
- dumpType(D->getReturnType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getReturnType());
if (D->isThisDeclarationADefinition()) {
dumpDeclContext(D);
@@ -1820,7 +1549,7 @@ void ASTDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
}
void ASTDumper::VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
switch (D->getVariance()) {
case ObjCTypeParamVariance::Invariant:
break;
@@ -1836,11 +1565,11 @@ void ASTDumper::VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D) {
if (D->hasExplicitBound())
OS << " bounded";
- dumpType(D->getUnderlyingType());
+ NodeDumper.dumpType(D->getUnderlyingType());
}
void ASTDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpDeclRef(D->getClassInterface());
dumpObjCTypeParamList(D->getTypeParamList());
dumpDeclRef(D->getImplementation());
@@ -1851,20 +1580,20 @@ void ASTDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
}
void ASTDumper::VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpDeclRef(D->getClassInterface());
dumpDeclRef(D->getCategoryDecl());
}
void ASTDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
for (auto *Child : D->protocols())
dumpDeclRef(Child);
}
void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpObjCTypeParamList(D->getTypeParamListAsWritten());
dumpDeclRef(D->getSuperClass(), "super");
@@ -1874,7 +1603,7 @@ void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
}
void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpDeclRef(D->getSuperClass(), "super");
dumpDeclRef(D->getClassInterface());
for (ObjCImplementationDecl::init_const_iterator I = D->init_begin(),
@@ -1884,13 +1613,13 @@ void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
}
void ASTDumper::VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpDeclRef(D->getClassInterface());
}
void ASTDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (D->getPropertyImplementation() == ObjCPropertyDecl::Required)
OS << " required";
@@ -1929,7 +1658,7 @@ void ASTDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
}
void ASTDumper::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
- dumpName(D->getPropertyDecl());
+ NodeDumper.dumpName(D->getPropertyDecl());
if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
OS << " synthesize";
else
@@ -1957,7 +1686,7 @@ void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
OS << " nested";
if (I.getVariable()) {
OS << ' ';
- dumpBareDeclRef(I.getVariable());
+ NodeDumper.dumpBareDeclRef(I.getVariable());
}
if (I.hasCopyExpr())
dumpStmt(I.getCopyExpr());
@@ -1973,39 +1702,68 @@ void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
void ASTDumper::dumpStmt(const Stmt *S) {
dumpChild([=] {
if (!S) {
- ColorScope Color(*this, NullColor);
+ ColorScope Color(OS, ShowColors, NullColor);
OS << "<<<NULL>>>";
return;
}
-
- // Some statements have custom mechanisms for dumping their children.
- if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
- VisitDeclStmt(DS);
- return;
+ {
+ ColorScope Color(OS, ShowColors, StmtColor);
+ OS << S->getStmtClassName();
}
- if (const GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(S)) {
- VisitGenericSelectionExpr(GSE);
- return;
+ NodeDumper.dumpPointer(S);
+ NodeDumper.dumpSourceRange(S->getSourceRange());
+
+ if (const auto *E = dyn_cast<Expr>(S)) {
+ NodeDumper.dumpType(E->getType());
+
+ {
+ ColorScope Color(OS, ShowColors, ValueKindColor);
+ switch (E->getValueKind()) {
+ case VK_RValue:
+ break;
+ case VK_LValue:
+ OS << " lvalue";
+ break;
+ case VK_XValue:
+ OS << " xvalue";
+ break;
+ }
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, ObjectKindColor);
+ switch (E->getObjectKind()) {
+ case OK_Ordinary:
+ break;
+ case OK_BitField:
+ OS << " bitfield";
+ break;
+ case OK_ObjCProperty:
+ OS << " objcproperty";
+ break;
+ case OK_ObjCSubscript:
+ OS << " objcsubscript";
+ break;
+ case OK_VectorComponent:
+ OS << " vectorcomponent";
+ break;
+ }
+ }
}
ConstStmtVisitor<ASTDumper>::Visit(S);
+ // Some statements have custom mechanisms for dumping their children.
+ if (isa<DeclStmt>(S) || isa<GenericSelectionExpr>(S)) {
+ return;
+ }
+
for (const Stmt *SubStmt : S->children())
dumpStmt(SubStmt);
});
}
-void ASTDumper::VisitStmt(const Stmt *Node) {
- {
- ColorScope Color(*this, StmtColor);
- OS << Node->getStmtClassName();
- }
- dumpPointer(Node);
- dumpSourceRange(Node->getSourceRange());
-}
-
void ASTDumper::VisitDeclStmt(const DeclStmt *Node) {
- VisitStmt(Node);
for (DeclStmt::const_decl_iterator I = Node->decl_begin(),
E = Node->decl_end();
I != E; ++I)
@@ -2013,31 +1771,52 @@ void ASTDumper::VisitDeclStmt(const DeclStmt *Node) {
}
void ASTDumper::VisitAttributedStmt(const AttributedStmt *Node) {
- VisitStmt(Node);
for (ArrayRef<const Attr *>::iterator I = Node->getAttrs().begin(),
E = Node->getAttrs().end();
I != E; ++I)
dumpAttr(*I);
}
+void ASTDumper::VisitIfStmt(const IfStmt *Node) {
+ if (Node->hasInitStorage())
+ OS << " has_init";
+ if (Node->hasVarStorage())
+ OS << " has_var";
+ if (Node->hasElseStorage())
+ OS << " has_else";
+}
+
+void ASTDumper::VisitSwitchStmt(const SwitchStmt *Node) {
+ if (Node->hasInitStorage())
+ OS << " has_init";
+ if (Node->hasVarStorage())
+ OS << " has_var";
+}
+
+void ASTDumper::VisitWhileStmt(const WhileStmt *Node) {
+ if (Node->hasVarStorage())
+ OS << " has_var";
+}
+
void ASTDumper::VisitLabelStmt(const LabelStmt *Node) {
- VisitStmt(Node);
OS << " '" << Node->getName() << "'";
}
void ASTDumper::VisitGotoStmt(const GotoStmt *Node) {
- VisitStmt(Node);
OS << " '" << Node->getLabel()->getName() << "'";
- dumpPointer(Node->getLabel());
+ NodeDumper.dumpPointer(Node->getLabel());
}
void ASTDumper::VisitCXXCatchStmt(const CXXCatchStmt *Node) {
- VisitStmt(Node);
dumpDecl(Node->getExceptionDecl());
}
+void ASTDumper::VisitCaseStmt(const CaseStmt *Node) {
+ if (Node->caseStmtIsGNURange())
+ OS << " gnu_range";
+}
+
void ASTDumper::VisitCapturedStmt(const CapturedStmt *Node) {
- VisitStmt(Node);
dumpDecl(Node->getCapturedDecl());
}
@@ -2047,22 +1826,21 @@ void ASTDumper::VisitCapturedStmt(const CapturedStmt *Node) {
void ASTDumper::VisitOMPExecutableDirective(
const OMPExecutableDirective *Node) {
- VisitStmt(Node);
for (auto *C : Node->clauses()) {
dumpChild([=] {
if (!C) {
- ColorScope Color(*this, NullColor);
+ ColorScope Color(OS, ShowColors, NullColor);
OS << "<<<NULL>>> OMPClause";
return;
}
{
- ColorScope Color(*this, AttrColor);
+ ColorScope Color(OS, ShowColors, AttrColor);
StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
<< ClauseName.drop_front() << "Clause";
}
- dumpPointer(C);
- dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc()));
+ NodeDumper.dumpPointer(C);
+ NodeDumper.dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc()));
if (C->isImplicit())
OS << " <implicit>";
for (auto *S : C->children())
@@ -2075,45 +1853,6 @@ void ASTDumper::VisitOMPExecutableDirective(
// Expr dumping methods.
//===----------------------------------------------------------------------===//
-void ASTDumper::VisitExpr(const Expr *Node) {
- VisitStmt(Node);
- dumpType(Node->getType());
-
- {
- ColorScope Color(*this, ValueKindColor);
- switch (Node->getValueKind()) {
- case VK_RValue:
- break;
- case VK_LValue:
- OS << " lvalue";
- break;
- case VK_XValue:
- OS << " xvalue";
- break;
- }
- }
-
- {
- ColorScope Color(*this, ObjectKindColor);
- switch (Node->getObjectKind()) {
- case OK_Ordinary:
- break;
- case OK_BitField:
- OS << " bitfield";
- break;
- case OK_ObjCProperty:
- OS << " objcproperty";
- break;
- case OK_ObjCSubscript:
- OS << " objcsubscript";
- break;
- case OK_VectorComponent:
- OS << " vectorcomponent";
- break;
- }
- }
-}
-
static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) {
if (Node->path_empty())
return;
@@ -2139,11 +1878,15 @@ static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) {
OS << ')';
}
+void ASTDumper::VisitCallExpr(const CallExpr *Node) {
+ if (Node->usesADL())
+ OS << " adl";
+}
+
void ASTDumper::VisitCastExpr(const CastExpr *Node) {
- VisitExpr(Node);
OS << " <";
{
- ColorScope Color(*this, CastColor);
+ ColorScope Color(OS, ShowColors, CastColor);
OS << Node->getCastKindName();
}
dumpBasePath(OS, Node);
@@ -2157,19 +1900,16 @@ void ASTDumper::VisitImplicitCastExpr(const ImplicitCastExpr *Node) {
}
void ASTDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
- VisitExpr(Node);
-
OS << " ";
- dumpBareDeclRef(Node->getDecl());
+ NodeDumper.dumpBareDeclRef(Node->getDecl());
if (Node->getDecl() != Node->getFoundDecl()) {
OS << " (";
- dumpBareDeclRef(Node->getFoundDecl());
+ NodeDumper.dumpBareDeclRef(Node->getFoundDecl());
OS << ")";
}
}
void ASTDumper::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node) {
- VisitExpr(Node);
OS << " (";
if (!Node->requiresADL())
OS << "no ";
@@ -2180,85 +1920,65 @@ void ASTDumper::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node) {
if (I == E)
OS << " empty";
for (; I != E; ++I)
- dumpPointer(*I);
+ NodeDumper.dumpPointer(*I);
}
void ASTDumper::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node) {
- VisitExpr(Node);
-
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << " " << Node->getDecl()->getDeclKindName() << "Decl";
}
OS << "='" << *Node->getDecl() << "'";
- dumpPointer(Node->getDecl());
+ NodeDumper.dumpPointer(Node->getDecl());
if (Node->isFreeIvar())
OS << " isFreeIvar";
}
void ASTDumper::VisitPredefinedExpr(const PredefinedExpr *Node) {
- VisitExpr(Node);
- OS << " " << PredefinedExpr::getIdentTypeName(Node->getIdentType());
+ OS << " " << PredefinedExpr::getIdentKindName(Node->getIdentKind());
}
void ASTDumper::VisitCharacterLiteral(const CharacterLiteral *Node) {
- VisitExpr(Node);
- ColorScope Color(*this, ValueColor);
+ ColorScope Color(OS, ShowColors, ValueColor);
OS << " " << Node->getValue();
}
void ASTDumper::VisitIntegerLiteral(const IntegerLiteral *Node) {
- VisitExpr(Node);
-
bool isSigned = Node->getType()->isSignedIntegerType();
- ColorScope Color(*this, ValueColor);
+ ColorScope Color(OS, ShowColors, ValueColor);
OS << " " << Node->getValue().toString(10, isSigned);
}
void ASTDumper::VisitFixedPointLiteral(const FixedPointLiteral *Node) {
- VisitExpr(Node);
-
- ColorScope Color(*this, ValueColor);
+ ColorScope Color(OS, ShowColors, ValueColor);
OS << " " << Node->getValueAsString(/*Radix=*/10);
}
void ASTDumper::VisitFloatingLiteral(const FloatingLiteral *Node) {
- VisitExpr(Node);
- ColorScope Color(*this, ValueColor);
+ ColorScope Color(OS, ShowColors, ValueColor);
OS << " " << Node->getValueAsApproximateDouble();
}
void ASTDumper::VisitStringLiteral(const StringLiteral *Str) {
- VisitExpr(Str);
- ColorScope Color(*this, ValueColor);
+ ColorScope Color(OS, ShowColors, ValueColor);
OS << " ";
Str->outputString(OS);
}
void ASTDumper::VisitInitListExpr(const InitListExpr *ILE) {
- VisitExpr(ILE);
+ if (auto *Field = ILE->getInitializedFieldInUnion()) {
+ OS << " field ";
+ NodeDumper.dumpBareDeclRef(Field);
+ }
if (auto *Filler = ILE->getArrayFiller()) {
dumpChild([=] {
OS << "array filler";
dumpStmt(Filler);
});
}
- if (auto *Field = ILE->getInitializedFieldInUnion()) {
- OS << " field ";
- dumpBareDeclRef(Field);
- }
-}
-
-void ASTDumper::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
- VisitExpr(E);
-}
-
-void ASTDumper::VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) {
- VisitExpr(E);
}
void ASTDumper::VisitUnaryOperator(const UnaryOperator *Node) {
- VisitExpr(Node);
OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
<< " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
if (!Node->canOverflow())
@@ -2267,7 +1987,6 @@ void ASTDumper::VisitUnaryOperator(const UnaryOperator *Node) {
void ASTDumper::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *Node) {
- VisitExpr(Node);
switch(Node->getKind()) {
case UETT_SizeOf:
OS << " sizeof";
@@ -2281,51 +2000,46 @@ void ASTDumper::VisitUnaryExprOrTypeTraitExpr(
case UETT_OpenMPRequiredSimdAlign:
OS << " __builtin_omp_required_simd_align";
break;
+ case UETT_PreferredAlignOf:
+ OS << " __alignof";
+ break;
}
if (Node->isArgumentType())
- dumpType(Node->getArgumentType());
+ NodeDumper.dumpType(Node->getArgumentType());
}
void ASTDumper::VisitMemberExpr(const MemberExpr *Node) {
- VisitExpr(Node);
OS << " " << (Node->isArrow() ? "->" : ".") << *Node->getMemberDecl();
- dumpPointer(Node->getMemberDecl());
+ NodeDumper.dumpPointer(Node->getMemberDecl());
}
void ASTDumper::VisitExtVectorElementExpr(const ExtVectorElementExpr *Node) {
- VisitExpr(Node);
OS << " " << Node->getAccessor().getNameStart();
}
void ASTDumper::VisitBinaryOperator(const BinaryOperator *Node) {
- VisitExpr(Node);
OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
}
void ASTDumper::VisitCompoundAssignOperator(
const CompoundAssignOperator *Node) {
- VisitExpr(Node);
OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode())
<< "' ComputeLHSTy=";
- dumpBareType(Node->getComputationLHSType());
+ NodeDumper.dumpBareType(Node->getComputationLHSType());
OS << " ComputeResultTy=";
- dumpBareType(Node->getComputationResultType());
+ NodeDumper.dumpBareType(Node->getComputationResultType());
}
void ASTDumper::VisitBlockExpr(const BlockExpr *Node) {
- VisitExpr(Node);
dumpDecl(Node->getBlockDecl());
}
void ASTDumper::VisitOpaqueValueExpr(const OpaqueValueExpr *Node) {
- VisitExpr(Node);
-
if (Expr *Source = Node->getSourceExpr())
dumpStmt(Source);
}
void ASTDumper::VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
- VisitExpr(E);
if (E->isResultDependent())
OS << " result_dependent";
dumpStmt(E->getControllingExpr());
@@ -2335,7 +2049,7 @@ void ASTDumper::VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
dumpChild([=] {
if (const TypeSourceInfo *TSI = E->getAssocTypeSourceInfo(I)) {
OS << "case ";
- dumpType(TSI->getType());
+ NodeDumper.dumpType(TSI->getType());
} else {
OS << "default";
}
@@ -2353,9 +2067,8 @@ void ASTDumper::VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
// GNU extensions.
void ASTDumper::VisitAddrLabelExpr(const AddrLabelExpr *Node) {
- VisitExpr(Node);
OS << " " << Node->getLabel()->getName();
- dumpPointer(Node->getLabel());
+ NodeDumper.dumpPointer(Node->getLabel());
}
//===----------------------------------------------------------------------===//
@@ -2363,7 +2076,6 @@ void ASTDumper::VisitAddrLabelExpr(const AddrLabelExpr *Node) {
//===----------------------------------------------------------------------===//
void ASTDumper::VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node) {
- VisitExpr(Node);
OS << " " << Node->getCastName()
<< "<" << Node->getTypeAsWritten().getAsString() << ">"
<< " <" << Node->getCastKindName();
@@ -2372,33 +2084,28 @@ void ASTDumper::VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node) {
}
void ASTDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node) {
- VisitExpr(Node);
OS << " " << (Node->getValue() ? "true" : "false");
}
void ASTDumper::VisitCXXThisExpr(const CXXThisExpr *Node) {
- VisitExpr(Node);
OS << " this";
}
void ASTDumper::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node) {
- VisitExpr(Node);
OS << " functional cast to " << Node->getTypeAsWritten().getAsString()
<< " <" << Node->getCastKindName() << ">";
}
void ASTDumper::VisitCXXUnresolvedConstructExpr(
const CXXUnresolvedConstructExpr *Node) {
- VisitExpr(Node);
- dumpType(Node->getTypeAsWritten());
+ NodeDumper.dumpType(Node->getTypeAsWritten());
if (Node->isListInitialization())
OS << " list";
}
void ASTDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) {
- VisitExpr(Node);
CXXConstructorDecl *Ctor = Node->getConstructor();
- dumpType(Ctor->getType());
+ NodeDumper.dumpType(Ctor->getType());
if (Node->isElidable())
OS << " elidable";
if (Node->isListInitialization())
@@ -2410,62 +2117,50 @@ void ASTDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) {
}
void ASTDumper::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node) {
- VisitExpr(Node);
OS << " ";
- dumpCXXTemporary(Node->getTemporary());
+ NodeDumper.dumpCXXTemporary(Node->getTemporary());
}
void ASTDumper::VisitCXXNewExpr(const CXXNewExpr *Node) {
- VisitExpr(Node);
if (Node->isGlobalNew())
OS << " global";
if (Node->isArray())
OS << " array";
if (Node->getOperatorNew()) {
OS << ' ';
- dumpBareDeclRef(Node->getOperatorNew());
+ NodeDumper.dumpBareDeclRef(Node->getOperatorNew());
}
// We could dump the deallocation function used in case of error, but it's
// usually not that interesting.
}
void ASTDumper::VisitCXXDeleteExpr(const CXXDeleteExpr *Node) {
- VisitExpr(Node);
if (Node->isGlobalDelete())
OS << " global";
if (Node->isArrayForm())
OS << " array";
if (Node->getOperatorDelete()) {
OS << ' ';
- dumpBareDeclRef(Node->getOperatorDelete());
+ NodeDumper.dumpBareDeclRef(Node->getOperatorDelete());
}
}
void
ASTDumper::VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node) {
- VisitExpr(Node);
if (const ValueDecl *VD = Node->getExtendingDecl()) {
OS << " extended by ";
- dumpBareDeclRef(VD);
+ NodeDumper.dumpBareDeclRef(VD);
}
}
void ASTDumper::VisitExprWithCleanups(const ExprWithCleanups *Node) {
- VisitExpr(Node);
for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i)
dumpDeclRef(Node->getObject(i), "cleanup");
}
-void ASTDumper::dumpCXXTemporary(const CXXTemporary *Temporary) {
- OS << "(CXXTemporary";
- dumpPointer(Temporary);
- OS << ")";
-}
-
void ASTDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
- VisitExpr(Node);
- dumpPointer(Node->getPack());
- dumpName(Node->getPack());
+ NodeDumper.dumpPointer(Node->getPack());
+ NodeDumper.dumpName(Node->getPack());
if (Node->isPartiallySubstituted())
for (const auto &A : Node->getPartialArguments())
dumpTemplateArgument(A);
@@ -2473,7 +2168,6 @@ void ASTDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
void ASTDumper::VisitCXXDependentScopeMemberExpr(
const CXXDependentScopeMemberExpr *Node) {
- VisitExpr(Node);
OS << " " << (Node->isArrow() ? "->" : ".") << Node->getMember();
}
@@ -2482,7 +2176,6 @@ void ASTDumper::VisitCXXDependentScopeMemberExpr(
//===----------------------------------------------------------------------===//
void ASTDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) {
- VisitExpr(Node);
OS << " selector=";
Node->getSelector().print(OS);
switch (Node->getReceiverKind()) {
@@ -2491,7 +2184,7 @@ void ASTDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) {
case ObjCMessageExpr::Class:
OS << " class=";
- dumpBareType(Node->getClassReceiver());
+ NodeDumper.dumpBareType(Node->getClassReceiver());
break;
case ObjCMessageExpr::SuperInstance:
@@ -2505,7 +2198,6 @@ void ASTDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) {
}
void ASTDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *Node) {
- VisitExpr(Node);
if (auto *BoxingMethod = Node->getBoxingMethod()) {
OS << " selector=";
BoxingMethod->getSelector().print(OS);
@@ -2513,7 +2205,6 @@ void ASTDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *Node) {
}
void ASTDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
- VisitStmt(Node);
if (const VarDecl *CatchParam = Node->getCatchParamDecl())
dumpDecl(CatchParam);
else
@@ -2521,25 +2212,19 @@ void ASTDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
}
void ASTDumper::VisitObjCEncodeExpr(const ObjCEncodeExpr *Node) {
- VisitExpr(Node);
- dumpType(Node->getEncodedType());
+ NodeDumper.dumpType(Node->getEncodedType());
}
void ASTDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *Node) {
- VisitExpr(Node);
-
OS << " ";
Node->getSelector().print(OS);
}
void ASTDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *Node) {
- VisitExpr(Node);
-
OS << ' ' << *Node->getProtocol();
}
void ASTDumper::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node) {
- VisitExpr(Node);
if (Node->isImplicitProperty()) {
OS << " Kind=MethodRef Getter=\"";
if (Node->getImplicitPropertyGetter())
@@ -2570,7 +2255,6 @@ void ASTDumper::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node) {
}
void ASTDumper::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node) {
- VisitExpr(Node);
if (Node->isArraySubscriptRefExpr())
OS << " Kind=ArraySubscript GetterForArray=\"";
else
@@ -2591,7 +2275,6 @@ void ASTDumper::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node) {
}
void ASTDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node) {
- VisitExpr(Node);
OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
}
@@ -2599,145 +2282,19 @@ void ASTDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node) {
// Comments
//===----------------------------------------------------------------------===//
-const char *ASTDumper::getCommandName(unsigned CommandID) {
- if (Traits)
- return Traits->getCommandInfo(CommandID)->Name;
- const CommandInfo *Info = CommandTraits::getBuiltinCommandInfo(CommandID);
- if (Info)
- return Info->Name;
- return "<not a builtin command>";
-}
-
-void ASTDumper::dumpFullComment(const FullComment *C) {
- if (!C)
- return;
-
- FC = C;
- dumpComment(C);
- FC = nullptr;
-}
-
-void ASTDumper::dumpComment(const Comment *C) {
+void ASTDumper::dumpComment(const Comment *C, const FullComment *FC) {
dumpChild([=] {
+ NodeDumper.Visit(C, FC);
if (!C) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
return;
}
-
- {
- ColorScope Color(*this, CommentColor);
- OS << C->getCommentKindName();
- }
- dumpPointer(C);
- dumpSourceRange(C->getSourceRange());
- ConstCommentVisitor<ASTDumper>::visit(C);
+ ConstCommentVisitor<ASTDumper, void, const FullComment *>::visit(C, FC);
for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
I != E; ++I)
- dumpComment(*I);
+ dumpComment(*I, FC);
});
}
-void ASTDumper::visitTextComment(const TextComment *C) {
- OS << " Text=\"" << C->getText() << "\"";
-}
-
-void ASTDumper::visitInlineCommandComment(const InlineCommandComment *C) {
- OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
- switch (C->getRenderKind()) {
- case InlineCommandComment::RenderNormal:
- OS << " RenderNormal";
- break;
- case InlineCommandComment::RenderBold:
- OS << " RenderBold";
- break;
- case InlineCommandComment::RenderMonospaced:
- OS << " RenderMonospaced";
- break;
- case InlineCommandComment::RenderEmphasized:
- OS << " RenderEmphasized";
- break;
- }
-
- for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
- OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
-}
-
-void ASTDumper::visitHTMLStartTagComment(const HTMLStartTagComment *C) {
- OS << " Name=\"" << C->getTagName() << "\"";
- if (C->getNumAttrs() != 0) {
- OS << " Attrs: ";
- for (unsigned i = 0, e = C->getNumAttrs(); i != e; ++i) {
- const HTMLStartTagComment::Attribute &Attr = C->getAttr(i);
- OS << " \"" << Attr.Name << "=\"" << Attr.Value << "\"";
- }
- }
- if (C->isSelfClosing())
- OS << " SelfClosing";
-}
-
-void ASTDumper::visitHTMLEndTagComment(const HTMLEndTagComment *C) {
- OS << " Name=\"" << C->getTagName() << "\"";
-}
-
-void ASTDumper::visitBlockCommandComment(const BlockCommandComment *C) {
- OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
- for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
- OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
-}
-
-void ASTDumper::visitParamCommandComment(const ParamCommandComment *C) {
- OS << " " << ParamCommandComment::getDirectionAsString(C->getDirection());
-
- if (C->isDirectionExplicit())
- OS << " explicitly";
- else
- OS << " implicitly";
-
- if (C->hasParamName()) {
- if (C->isParamIndexValid())
- OS << " Param=\"" << C->getParamName(FC) << "\"";
- else
- OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
- }
-
- if (C->isParamIndexValid() && !C->isVarArgParam())
- OS << " ParamIndex=" << C->getParamIndex();
-}
-
-void ASTDumper::visitTParamCommandComment(const TParamCommandComment *C) {
- if (C->hasParamName()) {
- if (C->isPositionValid())
- OS << " Param=\"" << C->getParamName(FC) << "\"";
- else
- OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
- }
-
- if (C->isPositionValid()) {
- OS << " Position=<";
- for (unsigned i = 0, e = C->getDepth(); i != e; ++i) {
- OS << C->getIndex(i);
- if (i != e - 1)
- OS << ", ";
- }
- OS << ">";
- }
-}
-
-void ASTDumper::visitVerbatimBlockComment(const VerbatimBlockComment *C) {
- OS << " Name=\"" << getCommandName(C->getCommandID()) << "\""
- " CloseName=\"" << C->getCloseName() << "\"";
-}
-
-void ASTDumper::visitVerbatimBlockLineComment(
- const VerbatimBlockLineComment *C) {
- OS << " Text=\"" << C->getText() << "\"";
-}
-
-void ASTDumper::visitVerbatimLineComment(const VerbatimLineComment *C) {
- OS << " Text=\"" << C->getText() << "\"";
-}
-
//===----------------------------------------------------------------------===//
// Type method implementations
//===----------------------------------------------------------------------===//
@@ -2846,12 +2403,16 @@ LLVM_DUMP_METHOD void Comment::dump(const ASTContext &Context) const {
void Comment::dump(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM) const {
const FullComment *FC = dyn_cast<FullComment>(this);
+ if (!FC)
+ return;
ASTDumper D(OS, Traits, SM);
- D.dumpFullComment(FC);
+ D.dumpComment(FC, FC);
}
LLVM_DUMP_METHOD void Comment::dumpColor() const {
const FullComment *FC = dyn_cast<FullComment>(this);
+ if (!FC)
+ return;
ASTDumper D(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
- D.dumpFullComment(FC);
+ D.dumpComment(FC, FC);
}
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index 884d60df76..6ba80cdaa7 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -71,32 +71,43 @@
namespace clang {
- unsigned ASTImporter::getFieldIndex(Decl *F) {
- assert(F && (isa<FieldDecl>(*F) || isa<IndirectFieldDecl>(*F)) &&
- "Try to get field index for non-field.");
-
- auto *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
- if (!Owner)
- return 0;
-
- unsigned Index = 1;
- for (const auto *D : Owner->decls()) {
- if (D == F)
- return Index;
-
- if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D))
- ++Index;
+ using llvm::make_error;
+ using llvm::Error;
+ using llvm::Expected;
+ using ExpectedType = llvm::Expected<QualType>;
+ using ExpectedStmt = llvm::Expected<Stmt *>;
+ using ExpectedExpr = llvm::Expected<Expr *>;
+ using ExpectedDecl = llvm::Expected<Decl *>;
+ using ExpectedSLoc = llvm::Expected<SourceLocation>;
+
+ std::string ImportError::toString() const {
+ // FIXME: Improve error texts.
+ switch (Error) {
+ case NameConflict:
+ return "NameConflict";
+ case UnsupportedConstruct:
+ return "UnsupportedConstruct";
+ case Unknown:
+ return "Unknown error";
}
+ llvm_unreachable("Invalid error code.");
+ return "Invalid error code.";
+ }
- llvm_unreachable("Field was not found in its parent context.");
+ void ImportError::log(raw_ostream &OS) const {
+ OS << toString();
+ }
- return 0;
+ std::error_code ImportError::convertToErrorCode() const {
+ llvm_unreachable("Function not implemented.");
}
+ char ImportError::ID;
+
template <class T>
- SmallVector<Decl*, 2>
+ SmallVector<Decl *, 2>
getCanonicalForwardRedeclChain(Redeclarable<T>* D) {
- SmallVector<Decl*, 2> Redecls;
+ SmallVector<Decl *, 2> Redecls;
for (auto *R : D->getFirstDecl()->redecls()) {
if (R != D->getFirstDecl())
Redecls.push_back(R);
@@ -121,12 +132,132 @@ namespace clang {
To->setIsUsed();
}
- class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, QualType>,
- public DeclVisitor<ASTNodeImporter, Decl *>,
- public StmtVisitor<ASTNodeImporter, Stmt *> {
+ Optional<unsigned> ASTImporter::getFieldIndex(Decl *F) {
+ assert(F && (isa<FieldDecl>(*F) || isa<IndirectFieldDecl>(*F)) &&
+ "Try to get field index for non-field.");
+
+ auto *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
+ if (!Owner)
+ return None;
+
+ unsigned Index = 0;
+ for (const auto *D : Owner->decls()) {
+ if (D == F)
+ return Index;
+
+ if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D))
+ ++Index;
+ }
+
+ llvm_unreachable("Field was not found in its parent context.");
+
+ return None;
+ }
+
+ // FIXME: Temporary until every import returns Expected.
+ template <>
+ LLVM_NODISCARD Error
+ ASTImporter::importInto(SourceLocation &To, const SourceLocation &From) {
+ To = Import(From);
+ if (From.isValid() && To.isInvalid())
+ return llvm::make_error<ImportError>();
+ return Error::success();
+ }
+ // FIXME: Temporary until every import returns Expected.
+ template <>
+ LLVM_NODISCARD Error
+ ASTImporter::importInto(QualType &To, const QualType &From) {
+ To = Import(From);
+ if (!From.isNull() && To.isNull())
+ return llvm::make_error<ImportError>();
+ return Error::success();
+ }
+
+ class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, ExpectedType>,
+ public DeclVisitor<ASTNodeImporter, ExpectedDecl>,
+ public StmtVisitor<ASTNodeImporter, ExpectedStmt> {
ASTImporter &Importer;
- // Wrapper for an overload set.
+ // Use this instead of Importer.importInto .
+ template <typename ImportT>
+ LLVM_NODISCARD Error importInto(ImportT &To, const ImportT &From) {
+ return Importer.importInto(To, From);
+ }
+
+ // Use this to import pointers of specific type.
+ template <typename ImportT>
+ LLVM_NODISCARD Error importInto(ImportT *&To, ImportT *From) {
+ auto ToI = Importer.Import(From);
+ if (!ToI && From)
+ return make_error<ImportError>();
+ To = cast_or_null<ImportT>(ToI);
+ return Error::success();
+ // FIXME: This should be the final code.
+ //auto ToOrErr = Importer.Import(From);
+ //if (ToOrErr) {
+ // To = cast_or_null<ImportT>(*ToOrErr);
+ //}
+ //return ToOrErr.takeError();
+ }
+
+ // Call the import function of ASTImporter for a baseclass of type `T` and
+ // cast the return value to `T`.
+ template <typename T>
+ Expected<T *> import(T *From) {
+ auto *To = Importer.Import(From);
+ if (!To && From)
+ return make_error<ImportError>();
+ return cast_or_null<T>(To);
+ // FIXME: This should be the final code.
+ //auto ToOrErr = Importer.Import(From);
+ //if (!ToOrErr)
+ // return ToOrErr.takeError();
+ //return cast_or_null<T>(*ToOrErr);
+ }
+
+ template <typename T>
+ Expected<T *> import(const T *From) {
+ return import(const_cast<T *>(From));
+ }
+
+ // Call the import function of ASTImporter for type `T`.
+ template <typename T>
+ Expected<T> import(const T &From) {
+ T To = Importer.Import(From);
+ T DefaultT;
+ if (To == DefaultT && !(From == DefaultT))
+ return make_error<ImportError>();
+ return To;
+ // FIXME: This should be the final code.
+ //return Importer.Import(From);
+ }
+
+ template <class T>
+ Expected<std::tuple<T>>
+ importSeq(const T &From) {
+ Expected<T> ToOrErr = import(From);
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ return std::make_tuple<T>(std::move(*ToOrErr));
+ }
+
+ // Import multiple objects with a single function call.
+ // This should work for every type for which a variant of `import` exists.
+ // The arguments are processed from left to right and import is stopped on
+ // first error.
+ template <class THead, class... TTail>
+ Expected<std::tuple<THead, TTail...>>
+ importSeq(const THead &FromHead, const TTail &...FromTail) {
+ Expected<std::tuple<THead>> ToHeadOrErr = importSeq(FromHead);
+ if (!ToHeadOrErr)
+ return ToHeadOrErr.takeError();
+ Expected<std::tuple<TTail...>> ToTailOrErr = importSeq(FromTail...);
+ if (!ToTailOrErr)
+ return ToTailOrErr.takeError();
+ return std::tuple_cat(*ToHeadOrErr, *ToTailOrErr);
+ }
+
+// Wrapper for an overload set.
template <typename ToDeclT> struct CallOverloadedCreateFun {
template <typename... Args>
auto operator()(Args &&... args)
@@ -171,6 +302,11 @@ namespace clang {
LLVM_NODISCARD bool
GetImportedOrCreateSpecialDecl(ToDeclT *&ToD, CreateFunT CreateFun,
FromDeclT *FromD, Args &&... args) {
+ // FIXME: This code is needed later.
+ //if (Importer.getImportDeclErrorIfAny(FromD)) {
+ // ToD = nullptr;
+ // return true; // Already imported but with error.
+ //}
ToD = cast_or_null<ToDeclT>(Importer.GetAlreadyImportedOrNull(FromD));
if (ToD)
return true; // Already imported.
@@ -194,84 +330,82 @@ namespace clang {
public:
explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) {}
- using TypeVisitor<ASTNodeImporter, QualType>::Visit;
- using DeclVisitor<ASTNodeImporter, Decl *>::Visit;
- using StmtVisitor<ASTNodeImporter, Stmt *>::Visit;
+ using TypeVisitor<ASTNodeImporter, ExpectedType>::Visit;
+ using DeclVisitor<ASTNodeImporter, ExpectedDecl>::Visit;
+ using StmtVisitor<ASTNodeImporter, ExpectedStmt>::Visit;
// Importing types
- QualType VisitType(const Type *T);
- QualType VisitAtomicType(const AtomicType *T);
- QualType VisitBuiltinType(const BuiltinType *T);
- QualType VisitDecayedType(const DecayedType *T);
- QualType VisitComplexType(const ComplexType *T);
- QualType VisitPointerType(const PointerType *T);
- QualType VisitBlockPointerType(const BlockPointerType *T);
- QualType VisitLValueReferenceType(const LValueReferenceType *T);
- QualType VisitRValueReferenceType(const RValueReferenceType *T);
- QualType VisitMemberPointerType(const MemberPointerType *T);
- QualType VisitConstantArrayType(const ConstantArrayType *T);
- QualType VisitIncompleteArrayType(const IncompleteArrayType *T);
- QualType VisitVariableArrayType(const VariableArrayType *T);
- QualType VisitDependentSizedArrayType(const DependentSizedArrayType *T);
+ ExpectedType VisitType(const Type *T);
+ ExpectedType VisitAtomicType(const AtomicType *T);
+ ExpectedType VisitBuiltinType(const BuiltinType *T);
+ ExpectedType VisitDecayedType(const DecayedType *T);
+ ExpectedType VisitComplexType(const ComplexType *T);
+ ExpectedType VisitPointerType(const PointerType *T);
+ ExpectedType VisitBlockPointerType(const BlockPointerType *T);
+ ExpectedType VisitLValueReferenceType(const LValueReferenceType *T);
+ ExpectedType VisitRValueReferenceType(const RValueReferenceType *T);
+ ExpectedType VisitMemberPointerType(const MemberPointerType *T);
+ ExpectedType VisitConstantArrayType(const ConstantArrayType *T);
+ ExpectedType VisitIncompleteArrayType(const IncompleteArrayType *T);
+ ExpectedType VisitVariableArrayType(const VariableArrayType *T);
+ ExpectedType VisitDependentSizedArrayType(const DependentSizedArrayType *T);
// FIXME: DependentSizedExtVectorType
- QualType VisitVectorType(const VectorType *T);
- QualType VisitExtVectorType(const ExtVectorType *T);
- QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
- QualType VisitFunctionProtoType(const FunctionProtoType *T);
- QualType VisitUnresolvedUsingType(const UnresolvedUsingType *T);
- QualType VisitParenType(const ParenType *T);
- QualType VisitTypedefType(const TypedefType *T);
- QualType VisitTypeOfExprType(const TypeOfExprType *T);
+ ExpectedType VisitVectorType(const VectorType *T);
+ ExpectedType VisitExtVectorType(const ExtVectorType *T);
+ ExpectedType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
+ ExpectedType VisitFunctionProtoType(const FunctionProtoType *T);
+ ExpectedType VisitUnresolvedUsingType(const UnresolvedUsingType *T);
+ ExpectedType VisitParenType(const ParenType *T);
+ ExpectedType VisitTypedefType(const TypedefType *T);
+ ExpectedType VisitTypeOfExprType(const TypeOfExprType *T);
// FIXME: DependentTypeOfExprType
- QualType VisitTypeOfType(const TypeOfType *T);
- QualType VisitDecltypeType(const DecltypeType *T);
- QualType VisitUnaryTransformType(const UnaryTransformType *T);
- QualType VisitAutoType(const AutoType *T);
- QualType VisitInjectedClassNameType(const InjectedClassNameType *T);
+ ExpectedType VisitTypeOfType(const TypeOfType *T);
+ ExpectedType VisitDecltypeType(const DecltypeType *T);
+ ExpectedType VisitUnaryTransformType(const UnaryTransformType *T);
+ ExpectedType VisitAutoType(const AutoType *T);
+ ExpectedType VisitInjectedClassNameType(const InjectedClassNameType *T);
// FIXME: DependentDecltypeType
- QualType VisitRecordType(const RecordType *T);
- QualType VisitEnumType(const EnumType *T);
- QualType VisitAttributedType(const AttributedType *T);
- QualType VisitTemplateTypeParmType(const TemplateTypeParmType *T);
- QualType VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T);
- QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T);
- QualType VisitElaboratedType(const ElaboratedType *T);
- QualType VisitDependentNameType(const DependentNameType *T);
- QualType VisitPackExpansionType(const PackExpansionType *T);
- QualType VisitDependentTemplateSpecializationType(
+ ExpectedType VisitRecordType(const RecordType *T);
+ ExpectedType VisitEnumType(const EnumType *T);
+ ExpectedType VisitAttributedType(const AttributedType *T);
+ ExpectedType VisitTemplateTypeParmType(const TemplateTypeParmType *T);
+ ExpectedType VisitSubstTemplateTypeParmType(
+ const SubstTemplateTypeParmType *T);
+ ExpectedType VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T);
+ ExpectedType VisitElaboratedType(const ElaboratedType *T);
+ ExpectedType VisitDependentNameType(const DependentNameType *T);
+ ExpectedType VisitPackExpansionType(const PackExpansionType *T);
+ ExpectedType VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType *T);
- QualType VisitObjCInterfaceType(const ObjCInterfaceType *T);
- QualType VisitObjCObjectType(const ObjCObjectType *T);
- QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
+ ExpectedType VisitObjCInterfaceType(const ObjCInterfaceType *T);
+ ExpectedType VisitObjCObjectType(const ObjCObjectType *T);
+ ExpectedType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
// Importing declarations
- bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
- DeclContext *&LexicalDC, DeclarationName &Name,
- NamedDecl *&ToD, SourceLocation &Loc);
- void ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = nullptr);
- void ImportDeclarationNameLoc(const DeclarationNameInfo &From,
- DeclarationNameInfo& To);
- void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
- void ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To);
-
- bool ImportCastPath(CastExpr *E, CXXCastPath &Path);
+ Error ImportDeclParts(
+ NamedDecl *D, DeclContext *&DC, DeclContext *&LexicalDC,
+ DeclarationName &Name, NamedDecl *&ToD, SourceLocation &Loc);
+ Error ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = nullptr);
+ Error ImportDeclarationNameLoc(
+ const DeclarationNameInfo &From, DeclarationNameInfo &To);
+ Error ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
+ Error ImportDeclContext(
+ Decl *From, DeclContext *&ToDC, DeclContext *&ToLexicalDC);
+ Error ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To);
+
+ Expected<CXXCastPath> ImportCastPath(CastExpr *E);
using Designator = DesignatedInitExpr::Designator;
- Designator ImportDesignator(const Designator &D);
-
- Optional<LambdaCapture> ImportLambdaCapture(const LambdaCapture &From);
-
/// What we should import from the definition.
enum ImportDefinitionKind {
/// Import the default subset of the definition, which might be
/// nothing (if minimal import is set) or might be everything (if minimal
/// import is not set).
IDK_Default,
-
/// Import everything.
IDK_Everything,
-
/// Import only the bare bones needed to establish a valid
/// DeclContext.
IDK_Basic
@@ -282,41 +416,44 @@ namespace clang {
(IDK == IDK_Default && !Importer.isMinimalImport());
}
- bool ImportInitializer(VarDecl *From, VarDecl *To);
- bool ImportDefinition(RecordDecl *From, RecordDecl *To,
- ImportDefinitionKind Kind = IDK_Default);
- bool ImportDefinition(EnumDecl *From, EnumDecl *To,
- ImportDefinitionKind Kind = IDK_Default);
- bool ImportDefinition(ObjCInterfaceDecl *From, ObjCInterfaceDecl *To,
- ImportDefinitionKind Kind = IDK_Default);
- bool ImportDefinition(ObjCProtocolDecl *From, ObjCProtocolDecl *To,
- ImportDefinitionKind Kind = IDK_Default);
- TemplateParameterList *ImportTemplateParameterList(
+ Error ImportInitializer(VarDecl *From, VarDecl *To);
+ Error ImportDefinition(
+ RecordDecl *From, RecordDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ Error ImportDefinition(
+ EnumDecl *From, EnumDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ Error ImportDefinition(
+ ObjCInterfaceDecl *From, ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ Error ImportDefinition(
+ ObjCProtocolDecl *From, ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ Expected<TemplateParameterList *> ImportTemplateParameterList(
TemplateParameterList *Params);
- TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
- Optional<TemplateArgumentLoc> ImportTemplateArgumentLoc(
- const TemplateArgumentLoc &TALoc);
- bool ImportTemplateArguments(const TemplateArgument *FromArgs,
- unsigned NumFromArgs,
- SmallVectorImpl<TemplateArgument> &ToArgs);
+ Error ImportTemplateArguments(
+ const TemplateArgument *FromArgs, unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs);
+ Expected<TemplateArgument>
+ ImportTemplateArgument(const TemplateArgument &From);
template <typename InContainerTy>
- bool ImportTemplateArgumentListInfo(const InContainerTy &Container,
- TemplateArgumentListInfo &ToTAInfo);
+ Error ImportTemplateArgumentListInfo(
+ const InContainerTy &Container, TemplateArgumentListInfo &ToTAInfo);
template<typename InContainerTy>
- bool ImportTemplateArgumentListInfo(SourceLocation FromLAngleLoc,
- SourceLocation FromRAngleLoc,
- const InContainerTy &Container,
- TemplateArgumentListInfo &Result);
+ Error ImportTemplateArgumentListInfo(
+ SourceLocation FromLAngleLoc, SourceLocation FromRAngleLoc,
+ const InContainerTy &Container, TemplateArgumentListInfo &Result);
using TemplateArgsTy = SmallVector<TemplateArgument, 8>;
- using OptionalTemplateArgsTy = Optional<TemplateArgsTy>;
- std::tuple<FunctionTemplateDecl *, OptionalTemplateArgsTy>
+ using FunctionTemplateAndArgsTy =
+ std::tuple<FunctionTemplateDecl *, TemplateArgsTy>;
+ Expected<FunctionTemplateAndArgsTy>
ImportFunctionTemplateWithTemplateArgsFromSpecialization(
FunctionDecl *FromFD);
- bool ImportTemplateInformation(FunctionDecl *FromFD, FunctionDecl *ToFD);
+ Error ImportTemplateInformation(FunctionDecl *FromFD, FunctionDecl *ToFD);
bool IsStructuralMatch(Decl *From, Decl *To, bool Complain);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
@@ -330,258 +467,498 @@ namespace clang {
bool IsStructuralMatch(FunctionDecl *From, FunctionDecl *To);
bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To);
- Decl *VisitDecl(Decl *D);
- Decl *VisitEmptyDecl(EmptyDecl *D);
- Decl *VisitAccessSpecDecl(AccessSpecDecl *D);
- Decl *VisitStaticAssertDecl(StaticAssertDecl *D);
- Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
- Decl *VisitNamespaceDecl(NamespaceDecl *D);
- Decl *VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
- Decl *VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
- Decl *VisitTypedefDecl(TypedefDecl *D);
- Decl *VisitTypeAliasDecl(TypeAliasDecl *D);
- Decl *VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
- Decl *VisitLabelDecl(LabelDecl *D);
- Decl *VisitEnumDecl(EnumDecl *D);
- Decl *VisitRecordDecl(RecordDecl *D);
- Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
- Decl *VisitFunctionDecl(FunctionDecl *D);
- Decl *VisitCXXMethodDecl(CXXMethodDecl *D);
- Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
- Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
- Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
- Decl *VisitFieldDecl(FieldDecl *D);
- Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
- Decl *VisitFriendDecl(FriendDecl *D);
- Decl *VisitObjCIvarDecl(ObjCIvarDecl *D);
- Decl *VisitVarDecl(VarDecl *D);
- Decl *VisitImplicitParamDecl(ImplicitParamDecl *D);
- Decl *VisitParmVarDecl(ParmVarDecl *D);
- Decl *VisitObjCMethodDecl(ObjCMethodDecl *D);
- Decl *VisitObjCTypeParamDecl(ObjCTypeParamDecl *D);
- Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D);
- Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D);
- Decl *VisitLinkageSpecDecl(LinkageSpecDecl *D);
- Decl *VisitUsingDecl(UsingDecl *D);
- Decl *VisitUsingShadowDecl(UsingShadowDecl *D);
- Decl *VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
- Decl *VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
- Decl *VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
-
- ObjCTypeParamList *ImportObjCTypeParamList(ObjCTypeParamList *list);
- Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
- Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
- Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D);
- Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D);
- Decl *VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
- Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
- Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
- Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
- Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
- Decl *VisitClassTemplateSpecializationDecl(
+ ExpectedDecl VisitDecl(Decl *D);
+ ExpectedDecl VisitImportDecl(ImportDecl *D);
+ ExpectedDecl VisitEmptyDecl(EmptyDecl *D);
+ ExpectedDecl VisitAccessSpecDecl(AccessSpecDecl *D);
+ ExpectedDecl VisitStaticAssertDecl(StaticAssertDecl *D);
+ ExpectedDecl VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ ExpectedDecl VisitNamespaceDecl(NamespaceDecl *D);
+ ExpectedDecl VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ ExpectedDecl VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
+ ExpectedDecl VisitTypedefDecl(TypedefDecl *D);
+ ExpectedDecl VisitTypeAliasDecl(TypeAliasDecl *D);
+ ExpectedDecl VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
+ ExpectedDecl VisitLabelDecl(LabelDecl *D);
+ ExpectedDecl VisitEnumDecl(EnumDecl *D);
+ ExpectedDecl VisitRecordDecl(RecordDecl *D);
+ ExpectedDecl VisitEnumConstantDecl(EnumConstantDecl *D);
+ ExpectedDecl VisitFunctionDecl(FunctionDecl *D);
+ ExpectedDecl VisitCXXMethodDecl(CXXMethodDecl *D);
+ ExpectedDecl VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ ExpectedDecl VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ ExpectedDecl VisitCXXConversionDecl(CXXConversionDecl *D);
+ ExpectedDecl VisitFieldDecl(FieldDecl *D);
+ ExpectedDecl VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ ExpectedDecl VisitFriendDecl(FriendDecl *D);
+ ExpectedDecl VisitObjCIvarDecl(ObjCIvarDecl *D);
+ ExpectedDecl VisitVarDecl(VarDecl *D);
+ ExpectedDecl VisitImplicitParamDecl(ImplicitParamDecl *D);
+ ExpectedDecl VisitParmVarDecl(ParmVarDecl *D);
+ ExpectedDecl VisitObjCMethodDecl(ObjCMethodDecl *D);
+ ExpectedDecl VisitObjCTypeParamDecl(ObjCTypeParamDecl *D);
+ ExpectedDecl VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ ExpectedDecl VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ ExpectedDecl VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ ExpectedDecl VisitUsingDecl(UsingDecl *D);
+ ExpectedDecl VisitUsingShadowDecl(UsingShadowDecl *D);
+ ExpectedDecl VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ ExpectedDecl VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ ExpectedDecl VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+
+ Expected<ObjCTypeParamList *>
+ ImportObjCTypeParamList(ObjCTypeParamList *list);
+
+ ExpectedDecl VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ ExpectedDecl VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ ExpectedDecl VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ ExpectedDecl VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ ExpectedDecl VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ ExpectedDecl VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ ExpectedDecl VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ ExpectedDecl VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ ExpectedDecl VisitClassTemplateDecl(ClassTemplateDecl *D);
+ ExpectedDecl VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D);
- Decl *VisitVarTemplateDecl(VarTemplateDecl *D);
- Decl *VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D);
- Decl *VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ ExpectedDecl VisitVarTemplateDecl(VarTemplateDecl *D);
+ ExpectedDecl VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D);
+ ExpectedDecl VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
// Importing statements
- DeclGroupRef ImportDeclGroup(DeclGroupRef DG);
-
- Stmt *VisitStmt(Stmt *S);
- Stmt *VisitGCCAsmStmt(GCCAsmStmt *S);
- Stmt *VisitDeclStmt(DeclStmt *S);
- Stmt *VisitNullStmt(NullStmt *S);
- Stmt *VisitCompoundStmt(CompoundStmt *S);
- Stmt *VisitCaseStmt(CaseStmt *S);
- Stmt *VisitDefaultStmt(DefaultStmt *S);
- Stmt *VisitLabelStmt(LabelStmt *S);
- Stmt *VisitAttributedStmt(AttributedStmt *S);
- Stmt *VisitIfStmt(IfStmt *S);
- Stmt *VisitSwitchStmt(SwitchStmt *S);
- Stmt *VisitWhileStmt(WhileStmt *S);
- Stmt *VisitDoStmt(DoStmt *S);
- Stmt *VisitForStmt(ForStmt *S);
- Stmt *VisitGotoStmt(GotoStmt *S);
- Stmt *VisitIndirectGotoStmt(IndirectGotoStmt *S);
- Stmt *VisitContinueStmt(ContinueStmt *S);
- Stmt *VisitBreakStmt(BreakStmt *S);
- Stmt *VisitReturnStmt(ReturnStmt *S);
+ ExpectedStmt VisitStmt(Stmt *S);
+ ExpectedStmt VisitGCCAsmStmt(GCCAsmStmt *S);
+ ExpectedStmt VisitDeclStmt(DeclStmt *S);
+ ExpectedStmt VisitNullStmt(NullStmt *S);
+ ExpectedStmt VisitCompoundStmt(CompoundStmt *S);
+ ExpectedStmt VisitCaseStmt(CaseStmt *S);
+ ExpectedStmt VisitDefaultStmt(DefaultStmt *S);
+ ExpectedStmt VisitLabelStmt(LabelStmt *S);
+ ExpectedStmt VisitAttributedStmt(AttributedStmt *S);
+ ExpectedStmt VisitIfStmt(IfStmt *S);
+ ExpectedStmt VisitSwitchStmt(SwitchStmt *S);
+ ExpectedStmt VisitWhileStmt(WhileStmt *S);
+ ExpectedStmt VisitDoStmt(DoStmt *S);
+ ExpectedStmt VisitForStmt(ForStmt *S);
+ ExpectedStmt VisitGotoStmt(GotoStmt *S);
+ ExpectedStmt VisitIndirectGotoStmt(IndirectGotoStmt *S);
+ ExpectedStmt VisitContinueStmt(ContinueStmt *S);
+ ExpectedStmt VisitBreakStmt(BreakStmt *S);
+ ExpectedStmt VisitReturnStmt(ReturnStmt *S);
// FIXME: MSAsmStmt
// FIXME: SEHExceptStmt
// FIXME: SEHFinallyStmt
// FIXME: SEHTryStmt
// FIXME: SEHLeaveStmt
// FIXME: CapturedStmt
- Stmt *VisitCXXCatchStmt(CXXCatchStmt *S);
- Stmt *VisitCXXTryStmt(CXXTryStmt *S);
- Stmt *VisitCXXForRangeStmt(CXXForRangeStmt *S);
+ ExpectedStmt VisitCXXCatchStmt(CXXCatchStmt *S);
+ ExpectedStmt VisitCXXTryStmt(CXXTryStmt *S);
+ ExpectedStmt VisitCXXForRangeStmt(CXXForRangeStmt *S);
// FIXME: MSDependentExistsStmt
- Stmt *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
- Stmt *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
- Stmt *VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S);
- Stmt *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
- Stmt *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
- Stmt *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
- Stmt *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
+ ExpectedStmt VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
+ ExpectedStmt VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
+ ExpectedStmt VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S);
+ ExpectedStmt VisitObjCAtTryStmt(ObjCAtTryStmt *S);
+ ExpectedStmt VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ ExpectedStmt VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
+ ExpectedStmt VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
// Importing expressions
- Expr *VisitExpr(Expr *E);
- Expr *VisitVAArgExpr(VAArgExpr *E);
- Expr *VisitGNUNullExpr(GNUNullExpr *E);
- Expr *VisitPredefinedExpr(PredefinedExpr *E);
- Expr *VisitDeclRefExpr(DeclRefExpr *E);
- Expr *VisitImplicitValueInitExpr(ImplicitValueInitExpr *ILE);
- Expr *VisitDesignatedInitExpr(DesignatedInitExpr *E);
- Expr *VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
- Expr *VisitIntegerLiteral(IntegerLiteral *E);
- Expr *VisitFloatingLiteral(FloatingLiteral *E);
- Expr *VisitImaginaryLiteral(ImaginaryLiteral *E);
- Expr *VisitCharacterLiteral(CharacterLiteral *E);
- Expr *VisitStringLiteral(StringLiteral *E);
- Expr *VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
- Expr *VisitAtomicExpr(AtomicExpr *E);
- Expr *VisitAddrLabelExpr(AddrLabelExpr *E);
- Expr *VisitParenExpr(ParenExpr *E);
- Expr *VisitParenListExpr(ParenListExpr *E);
- Expr *VisitStmtExpr(StmtExpr *E);
- Expr *VisitUnaryOperator(UnaryOperator *E);
- Expr *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
- Expr *VisitBinaryOperator(BinaryOperator *E);
- Expr *VisitConditionalOperator(ConditionalOperator *E);
- Expr *VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
- Expr *VisitOpaqueValueExpr(OpaqueValueExpr *E);
- Expr *VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E);
- Expr *VisitExpressionTraitExpr(ExpressionTraitExpr *E);
- Expr *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
- Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E);
- Expr *VisitImplicitCastExpr(ImplicitCastExpr *E);
- Expr *VisitExplicitCastExpr(ExplicitCastExpr *E);
- Expr *VisitOffsetOfExpr(OffsetOfExpr *OE);
- Expr *VisitCXXThrowExpr(CXXThrowExpr *E);
- Expr *VisitCXXNoexceptExpr(CXXNoexceptExpr *E);
- Expr *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
- Expr *VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
- Expr *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
- Expr *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *CE);
- Expr *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
- Expr *VisitPackExpansionExpr(PackExpansionExpr *E);
- Expr *VisitSizeOfPackExpr(SizeOfPackExpr *E);
- Expr *VisitCXXNewExpr(CXXNewExpr *CE);
- Expr *VisitCXXDeleteExpr(CXXDeleteExpr *E);
- Expr *VisitCXXConstructExpr(CXXConstructExpr *E);
- Expr *VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
- Expr *VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
- Expr *VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
- Expr *VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *CE);
- Expr *VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
- Expr *VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
- Expr *VisitExprWithCleanups(ExprWithCleanups *EWC);
- Expr *VisitCXXThisExpr(CXXThisExpr *E);
- Expr *VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
- Expr *VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
- Expr *VisitMemberExpr(MemberExpr *E);
- Expr *VisitCallExpr(CallExpr *E);
- Expr *VisitLambdaExpr(LambdaExpr *LE);
- Expr *VisitInitListExpr(InitListExpr *E);
- Expr *VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
- Expr *VisitCXXInheritedCtorInitExpr(CXXInheritedCtorInitExpr *E);
- Expr *VisitArrayInitLoopExpr(ArrayInitLoopExpr *E);
- Expr *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E);
- Expr *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E);
- Expr *VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
- Expr *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E);
- Expr *VisitTypeTraitExpr(TypeTraitExpr *E);
- Expr *VisitCXXTypeidExpr(CXXTypeidExpr *E);
+ ExpectedStmt VisitExpr(Expr *E);
+ ExpectedStmt VisitVAArgExpr(VAArgExpr *E);
+ ExpectedStmt VisitGNUNullExpr(GNUNullExpr *E);
+ ExpectedStmt VisitPredefinedExpr(PredefinedExpr *E);
+ ExpectedStmt VisitDeclRefExpr(DeclRefExpr *E);
+ ExpectedStmt VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ ExpectedStmt VisitDesignatedInitExpr(DesignatedInitExpr *E);
+ ExpectedStmt VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
+ ExpectedStmt VisitIntegerLiteral(IntegerLiteral *E);
+ ExpectedStmt VisitFloatingLiteral(FloatingLiteral *E);
+ ExpectedStmt VisitImaginaryLiteral(ImaginaryLiteral *E);
+ ExpectedStmt VisitCharacterLiteral(CharacterLiteral *E);
+ ExpectedStmt VisitStringLiteral(StringLiteral *E);
+ ExpectedStmt VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ ExpectedStmt VisitAtomicExpr(AtomicExpr *E);
+ ExpectedStmt VisitAddrLabelExpr(AddrLabelExpr *E);
+ ExpectedStmt VisitConstantExpr(ConstantExpr *E);
+ ExpectedStmt VisitParenExpr(ParenExpr *E);
+ ExpectedStmt VisitParenListExpr(ParenListExpr *E);
+ ExpectedStmt VisitStmtExpr(StmtExpr *E);
+ ExpectedStmt VisitUnaryOperator(UnaryOperator *E);
+ ExpectedStmt VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
+ ExpectedStmt VisitBinaryOperator(BinaryOperator *E);
+ ExpectedStmt VisitConditionalOperator(ConditionalOperator *E);
+ ExpectedStmt VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
+ ExpectedStmt VisitOpaqueValueExpr(OpaqueValueExpr *E);
+ ExpectedStmt VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E);
+ ExpectedStmt VisitExpressionTraitExpr(ExpressionTraitExpr *E);
+ ExpectedStmt VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ ExpectedStmt VisitCompoundAssignOperator(CompoundAssignOperator *E);
+ ExpectedStmt VisitImplicitCastExpr(ImplicitCastExpr *E);
+ ExpectedStmt VisitExplicitCastExpr(ExplicitCastExpr *E);
+ ExpectedStmt VisitOffsetOfExpr(OffsetOfExpr *OE);
+ ExpectedStmt VisitCXXThrowExpr(CXXThrowExpr *E);
+ ExpectedStmt VisitCXXNoexceptExpr(CXXNoexceptExpr *E);
+ ExpectedStmt VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
+ ExpectedStmt VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
+ ExpectedStmt VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ ExpectedStmt VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E);
+ ExpectedStmt VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
+ ExpectedStmt VisitPackExpansionExpr(PackExpansionExpr *E);
+ ExpectedStmt VisitSizeOfPackExpr(SizeOfPackExpr *E);
+ ExpectedStmt VisitCXXNewExpr(CXXNewExpr *E);
+ ExpectedStmt VisitCXXDeleteExpr(CXXDeleteExpr *E);
+ ExpectedStmt VisitCXXConstructExpr(CXXConstructExpr *E);
+ ExpectedStmt VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
+ ExpectedStmt VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
+ ExpectedStmt VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
+ ExpectedStmt VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E);
+ ExpectedStmt VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
+ ExpectedStmt VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
+ ExpectedStmt VisitExprWithCleanups(ExprWithCleanups *E);
+ ExpectedStmt VisitCXXThisExpr(CXXThisExpr *E);
+ ExpectedStmt VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
+ ExpectedStmt VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
+ ExpectedStmt VisitMemberExpr(MemberExpr *E);
+ ExpectedStmt VisitCallExpr(CallExpr *E);
+ ExpectedStmt VisitLambdaExpr(LambdaExpr *LE);
+ ExpectedStmt VisitInitListExpr(InitListExpr *E);
+ ExpectedStmt VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
+ ExpectedStmt VisitCXXInheritedCtorInitExpr(CXXInheritedCtorInitExpr *E);
+ ExpectedStmt VisitArrayInitLoopExpr(ArrayInitLoopExpr *E);
+ ExpectedStmt VisitArrayInitIndexExpr(ArrayInitIndexExpr *E);
+ ExpectedStmt VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E);
+ ExpectedStmt VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
+ ExpectedStmt VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E);
+ ExpectedStmt VisitTypeTraitExpr(TypeTraitExpr *E);
+ ExpectedStmt VisitCXXTypeidExpr(CXXTypeidExpr *E);
template<typename IIter, typename OIter>
- void ImportArray(IIter Ibegin, IIter Iend, OIter Obegin) {
+ Error ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) {
using ItemT = typename std::remove_reference<decltype(*Obegin)>::type;
-
- ASTImporter &ImporterRef = Importer;
- std::transform(Ibegin, Iend, Obegin,
- [&ImporterRef](ItemT From) -> ItemT {
- return ImporterRef.Import(From);
- });
- }
-
- template<typename IIter, typename OIter>
- bool ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) {
- using ItemT = typename std::remove_reference<decltype(**Obegin)>::type;
-
- ASTImporter &ImporterRef = Importer;
- bool Failed = false;
- std::transform(Ibegin, Iend, Obegin,
- [&ImporterRef, &Failed](ItemT *From) -> ItemT * {
- auto *To = cast_or_null<ItemT>(ImporterRef.Import(From));
- if (!To && From)
- Failed = true;
- return To;
- });
- return Failed;
+ for (; Ibegin != Iend; ++Ibegin, ++Obegin) {
+ Expected<ItemT> ToOrErr = import(*Ibegin);
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ *Obegin = *ToOrErr;
+ }
+ return Error::success();
}
+ // Import every item from a container structure into an output container.
+ // If error occurs, stops at first error and returns the error.
+ // The output container should have space for all needed elements (it is not
+ // expanded, new items are put into from the beginning).
template<typename InContainerTy, typename OutContainerTy>
- bool ImportContainerChecked(const InContainerTy &InContainer,
- OutContainerTy &OutContainer) {
- return ImportArrayChecked(InContainer.begin(), InContainer.end(),
- OutContainer.begin());
+ Error ImportContainerChecked(
+ const InContainerTy &InContainer, OutContainerTy &OutContainer) {
+ return ImportArrayChecked(
+ InContainer.begin(), InContainer.end(), OutContainer.begin());
}
template<typename InContainerTy, typename OIter>
- bool ImportArrayChecked(const InContainerTy &InContainer, OIter Obegin) {
+ Error ImportArrayChecked(const InContainerTy &InContainer, OIter Obegin) {
return ImportArrayChecked(InContainer.begin(), InContainer.end(), Obegin);
}
- // Importing overrides.
void ImportOverrides(CXXMethodDecl *ToMethod, CXXMethodDecl *FromMethod);
- FunctionDecl *FindFunctionTemplateSpecialization(FunctionDecl *FromFD);
+ Expected<FunctionDecl *> FindFunctionTemplateSpecialization(
+ FunctionDecl *FromFD);
};
+// FIXME: Temporary until every import returns Expected.
+template <>
+Expected<TemplateName> ASTNodeImporter::import(const TemplateName &From) {
+ TemplateName To = Importer.Import(From);
+ if (To.isNull() && !From.isNull())
+ return make_error<ImportError>();
+ return To;
+}
+
template <typename InContainerTy>
-bool ASTNodeImporter::ImportTemplateArgumentListInfo(
+Error ASTNodeImporter::ImportTemplateArgumentListInfo(
SourceLocation FromLAngleLoc, SourceLocation FromRAngleLoc,
const InContainerTy &Container, TemplateArgumentListInfo &Result) {
- TemplateArgumentListInfo ToTAInfo(Importer.Import(FromLAngleLoc),
- Importer.Import(FromRAngleLoc));
- if (ImportTemplateArgumentListInfo(Container, ToTAInfo))
- return true;
+ auto ToLAngleLocOrErr = import(FromLAngleLoc);
+ if (!ToLAngleLocOrErr)
+ return ToLAngleLocOrErr.takeError();
+ auto ToRAngleLocOrErr = import(FromRAngleLoc);
+ if (!ToRAngleLocOrErr)
+ return ToRAngleLocOrErr.takeError();
+
+ TemplateArgumentListInfo ToTAInfo(*ToLAngleLocOrErr, *ToRAngleLocOrErr);
+ if (auto Err = ImportTemplateArgumentListInfo(Container, ToTAInfo))
+ return Err;
Result = ToTAInfo;
- return false;
+ return Error::success();
}
template <>
-bool ASTNodeImporter::ImportTemplateArgumentListInfo<TemplateArgumentListInfo>(
+Error ASTNodeImporter::ImportTemplateArgumentListInfo<TemplateArgumentListInfo>(
const TemplateArgumentListInfo &From, TemplateArgumentListInfo &Result) {
return ImportTemplateArgumentListInfo(
From.getLAngleLoc(), From.getRAngleLoc(), From.arguments(), Result);
}
template <>
-bool ASTNodeImporter::ImportTemplateArgumentListInfo<
- ASTTemplateArgumentListInfo>(const ASTTemplateArgumentListInfo &From,
- TemplateArgumentListInfo &Result) {
- return ImportTemplateArgumentListInfo(From.LAngleLoc, From.RAngleLoc,
- From.arguments(), Result);
+Error ASTNodeImporter::ImportTemplateArgumentListInfo<
+ ASTTemplateArgumentListInfo>(
+ const ASTTemplateArgumentListInfo &From,
+ TemplateArgumentListInfo &Result) {
+ return ImportTemplateArgumentListInfo(
+ From.LAngleLoc, From.RAngleLoc, From.arguments(), Result);
}
-std::tuple<FunctionTemplateDecl *, ASTNodeImporter::OptionalTemplateArgsTy>
+Expected<ASTNodeImporter::FunctionTemplateAndArgsTy>
ASTNodeImporter::ImportFunctionTemplateWithTemplateArgsFromSpecialization(
FunctionDecl *FromFD) {
assert(FromFD->getTemplatedKind() ==
- FunctionDecl::TK_FunctionTemplateSpecialization);
+ FunctionDecl::TK_FunctionTemplateSpecialization);
+
+ FunctionTemplateAndArgsTy Result;
+
auto *FTSInfo = FromFD->getTemplateSpecializationInfo();
- auto *Template = cast_or_null<FunctionTemplateDecl>(
- Importer.Import(FTSInfo->getTemplate()));
+ if (Error Err = importInto(std::get<0>(Result), FTSInfo->getTemplate()))
+ return std::move(Err);
// Import template arguments.
auto TemplArgs = FTSInfo->TemplateArguments->asArray();
- TemplateArgsTy ToTemplArgs;
- if (ImportTemplateArguments(TemplArgs.data(), TemplArgs.size(),
- ToTemplArgs)) // Error during import.
- return std::make_tuple(Template, OptionalTemplateArgsTy());
+ if (Error Err = ImportTemplateArguments(TemplArgs.data(), TemplArgs.size(),
+ std::get<1>(Result)))
+ return std::move(Err);
+
+ return Result;
+}
+
+template <>
+Expected<TemplateParameterList *>
+ASTNodeImporter::import(TemplateParameterList *From) {
+ SmallVector<NamedDecl *, 4> To(From->size());
+ if (Error Err = ImportContainerChecked(*From, To))
+ return std::move(Err);
+
+ ExpectedExpr ToRequiresClause = import(From->getRequiresClause());
+ if (!ToRequiresClause)
+ return ToRequiresClause.takeError();
+
+ auto ToTemplateLocOrErr = import(From->getTemplateLoc());
+ if (!ToTemplateLocOrErr)
+ return ToTemplateLocOrErr.takeError();
+ auto ToLAngleLocOrErr = import(From->getLAngleLoc());
+ if (!ToLAngleLocOrErr)
+ return ToLAngleLocOrErr.takeError();
+ auto ToRAngleLocOrErr = import(From->getRAngleLoc());
+ if (!ToRAngleLocOrErr)
+ return ToRAngleLocOrErr.takeError();
+
+ return TemplateParameterList::Create(
+ Importer.getToContext(),
+ *ToTemplateLocOrErr,
+ *ToLAngleLocOrErr,
+ To,
+ *ToRAngleLocOrErr,
+ *ToRequiresClause);
+}
+
+template <>
+Expected<TemplateArgument>
+ASTNodeImporter::import(const TemplateArgument &From) {
+ switch (From.getKind()) {
+ case TemplateArgument::Null:
+ return TemplateArgument();
- return std::make_tuple(Template, ToTemplArgs);
+ case TemplateArgument::Type: {
+ ExpectedType ToTypeOrErr = import(From.getAsType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return TemplateArgument(*ToTypeOrErr);
+ }
+
+ case TemplateArgument::Integral: {
+ ExpectedType ToTypeOrErr = import(From.getIntegralType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return TemplateArgument(From, *ToTypeOrErr);
+ }
+
+ case TemplateArgument::Declaration: {
+ Expected<ValueDecl *> ToOrErr = import(From.getAsDecl());
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ ExpectedType ToTypeOrErr = import(From.getParamTypeForDecl());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return TemplateArgument(*ToOrErr, *ToTypeOrErr);
+ }
+
+ case TemplateArgument::NullPtr: {
+ ExpectedType ToTypeOrErr = import(From.getNullPtrType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/true);
+ }
+
+ case TemplateArgument::Template: {
+ Expected<TemplateName> ToTemplateOrErr = import(From.getAsTemplate());
+ if (!ToTemplateOrErr)
+ return ToTemplateOrErr.takeError();
+
+ return TemplateArgument(*ToTemplateOrErr);
+ }
+
+ case TemplateArgument::TemplateExpansion: {
+ Expected<TemplateName> ToTemplateOrErr =
+ import(From.getAsTemplateOrTemplatePattern());
+ if (!ToTemplateOrErr)
+ return ToTemplateOrErr.takeError();
+
+ return TemplateArgument(
+ *ToTemplateOrErr, From.getNumTemplateExpansions());
+ }
+
+ case TemplateArgument::Expression:
+ if (ExpectedExpr ToExpr = import(From.getAsExpr()))
+ return TemplateArgument(*ToExpr);
+ else
+ return ToExpr.takeError();
+
+ case TemplateArgument::Pack: {
+ SmallVector<TemplateArgument, 2> ToPack;
+ ToPack.reserve(From.pack_size());
+ if (Error Err = ImportTemplateArguments(
+ From.pack_begin(), From.pack_size(), ToPack))
+ return std::move(Err);
+
+ return TemplateArgument(
+ llvm::makeArrayRef(ToPack).copy(Importer.getToContext()));
+ }
+ }
+
+ llvm_unreachable("Invalid template argument kind");
+}
+
+template <>
+Expected<TemplateArgumentLoc>
+ASTNodeImporter::import(const TemplateArgumentLoc &TALoc) {
+ Expected<TemplateArgument> ArgOrErr = import(TALoc.getArgument());
+ if (!ArgOrErr)
+ return ArgOrErr.takeError();
+ TemplateArgument Arg = *ArgOrErr;
+
+ TemplateArgumentLocInfo FromInfo = TALoc.getLocInfo();
+
+ TemplateArgumentLocInfo ToInfo;
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ ExpectedExpr E = import(FromInfo.getAsExpr());
+ if (!E)
+ return E.takeError();
+ ToInfo = TemplateArgumentLocInfo(*E);
+ } else if (Arg.getKind() == TemplateArgument::Type) {
+ if (auto TSIOrErr = import(FromInfo.getAsTypeSourceInfo()))
+ ToInfo = TemplateArgumentLocInfo(*TSIOrErr);
+ else
+ return TSIOrErr.takeError();
+ } else {
+ auto ToTemplateQualifierLocOrErr =
+ import(FromInfo.getTemplateQualifierLoc());
+ if (!ToTemplateQualifierLocOrErr)
+ return ToTemplateQualifierLocOrErr.takeError();
+ auto ToTemplateNameLocOrErr = import(FromInfo.getTemplateNameLoc());
+ if (!ToTemplateNameLocOrErr)
+ return ToTemplateNameLocOrErr.takeError();
+ auto ToTemplateEllipsisLocOrErr =
+ import(FromInfo.getTemplateEllipsisLoc());
+ if (!ToTemplateEllipsisLocOrErr)
+ return ToTemplateEllipsisLocOrErr.takeError();
+
+ ToInfo = TemplateArgumentLocInfo(
+ *ToTemplateQualifierLocOrErr,
+ *ToTemplateNameLocOrErr,
+ *ToTemplateEllipsisLocOrErr);
+ }
+
+ return TemplateArgumentLoc(Arg, ToInfo);
+}
+
+template <>
+Expected<DeclGroupRef> ASTNodeImporter::import(const DeclGroupRef &DG) {
+ if (DG.isNull())
+ return DeclGroupRef::Create(Importer.getToContext(), nullptr, 0);
+ size_t NumDecls = DG.end() - DG.begin();
+ SmallVector<Decl *, 1> ToDecls;
+ ToDecls.reserve(NumDecls);
+ for (Decl *FromD : DG) {
+ if (auto ToDOrErr = import(FromD))
+ ToDecls.push_back(*ToDOrErr);
+ else
+ return ToDOrErr.takeError();
+ }
+ return DeclGroupRef::Create(Importer.getToContext(),
+ ToDecls.begin(),
+ NumDecls);
+}
+
+template <>
+Expected<ASTNodeImporter::Designator>
+ASTNodeImporter::import(const Designator &D) {
+ if (D.isFieldDesignator()) {
+ IdentifierInfo *ToFieldName = Importer.Import(D.getFieldName());
+
+ ExpectedSLoc ToDotLocOrErr = import(D.getDotLoc());
+ if (!ToDotLocOrErr)
+ return ToDotLocOrErr.takeError();
+
+ ExpectedSLoc ToFieldLocOrErr = import(D.getFieldLoc());
+ if (!ToFieldLocOrErr)
+ return ToFieldLocOrErr.takeError();
+
+ return Designator(ToFieldName, *ToDotLocOrErr, *ToFieldLocOrErr);
+ }
+
+ ExpectedSLoc ToLBracketLocOrErr = import(D.getLBracketLoc());
+ if (!ToLBracketLocOrErr)
+ return ToLBracketLocOrErr.takeError();
+
+ ExpectedSLoc ToRBracketLocOrErr = import(D.getRBracketLoc());
+ if (!ToRBracketLocOrErr)
+ return ToRBracketLocOrErr.takeError();
+
+ if (D.isArrayDesignator())
+ return Designator(D.getFirstExprIndex(),
+ *ToLBracketLocOrErr, *ToRBracketLocOrErr);
+
+ ExpectedSLoc ToEllipsisLocOrErr = import(D.getEllipsisLoc());
+ if (!ToEllipsisLocOrErr)
+ return ToEllipsisLocOrErr.takeError();
+
+ assert(D.isArrayRangeDesignator());
+ return Designator(
+ D.getFirstExprIndex(), *ToLBracketLocOrErr, *ToEllipsisLocOrErr,
+ *ToRBracketLocOrErr);
+}
+
+template <>
+Expected<LambdaCapture> ASTNodeImporter::import(const LambdaCapture &From) {
+ VarDecl *Var = nullptr;
+ if (From.capturesVariable()) {
+ if (auto VarOrErr = import(From.getCapturedVar()))
+ Var = *VarOrErr;
+ else
+ return VarOrErr.takeError();
+ }
+
+ auto LocationOrErr = import(From.getLocation());
+ if (!LocationOrErr)
+ return LocationOrErr.takeError();
+
+ SourceLocation EllipsisLoc;
+ if (From.isPackExpansion())
+ if (Error Err = importInto(EllipsisLoc, From.getEllipsisLoc()))
+ return std::move(Err);
+
+ return LambdaCapture(
+ *LocationOrErr, From.isImplicit(), From.getCaptureKind(), Var,
+ EllipsisLoc);
}
} // namespace clang
@@ -592,26 +969,30 @@ ASTNodeImporter::ImportFunctionTemplateWithTemplateArgsFromSpecialization(
using namespace clang;
-QualType ASTNodeImporter::VisitType(const Type *T) {
+ExpectedType ASTNodeImporter::VisitType(const Type *T) {
Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
<< T->getTypeClassName();
- return {};
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
-QualType ASTNodeImporter::VisitAtomicType(const AtomicType *T){
- QualType UnderlyingType = Importer.Import(T->getValueType());
- if(UnderlyingType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitAtomicType(const AtomicType *T){
+ ExpectedType UnderlyingTypeOrErr = import(T->getValueType());
+ if (!UnderlyingTypeOrErr)
+ return UnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getAtomicType(UnderlyingType);
+ return Importer.getToContext().getAtomicType(*UnderlyingTypeOrErr);
}
-QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
+ExpectedType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
switch (T->getKind()) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id: \
return Importer.getToContext().SingletonId;
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ return Importer.getToContext().Id##Ty;
+#include "clang/Basic/OpenCLExtensionTypes.def"
#define SHARED_SINGLETON_TYPE(Expansion)
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id: return Importer.getToContext().SingletonId;
@@ -651,183 +1032,196 @@ QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
llvm_unreachable("Invalid BuiltinType Kind!");
}
-QualType ASTNodeImporter::VisitDecayedType(const DecayedType *T) {
- QualType OrigT = Importer.Import(T->getOriginalType());
- if (OrigT.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitDecayedType(const DecayedType *T) {
+ ExpectedType ToOriginalTypeOrErr = import(T->getOriginalType());
+ if (!ToOriginalTypeOrErr)
+ return ToOriginalTypeOrErr.takeError();
- return Importer.getToContext().getDecayedType(OrigT);
+ return Importer.getToContext().getDecayedType(*ToOriginalTypeOrErr);
}
-QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getComplexType(ToElementType);
+ return Importer.getToContext().getComplexType(*ToElementTypeOrErr);
}
-QualType ASTNodeImporter::VisitPointerType(const PointerType *T) {
- QualType ToPointeeType = Importer.Import(T->getPointeeType());
- if (ToPointeeType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitPointerType(const PointerType *T) {
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getPointerType(ToPointeeType);
+ return Importer.getToContext().getPointerType(*ToPointeeTypeOrErr);
}
-QualType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
+ExpectedType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
// FIXME: Check for blocks support in "to" context.
- QualType ToPointeeType = Importer.Import(T->getPointeeType());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getBlockPointerType(ToPointeeType);
+ return Importer.getToContext().getBlockPointerType(*ToPointeeTypeOrErr);
}
-QualType
+ExpectedType
ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) {
// FIXME: Check for C++ support in "to" context.
- QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeTypeAsWritten());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getLValueReferenceType(ToPointeeType);
+ return Importer.getToContext().getLValueReferenceType(*ToPointeeTypeOrErr);
}
-QualType
+ExpectedType
ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) {
// FIXME: Check for C++0x support in "to" context.
- QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeTypeAsWritten());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getRValueReferenceType(ToPointeeType);
+ return Importer.getToContext().getRValueReferenceType(*ToPointeeTypeOrErr);
}
-QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
+ExpectedType
+ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
// FIXME: Check for C++ support in "to" context.
- QualType ToPointeeType = Importer.Import(T->getPointeeType());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
+
+ ExpectedType ClassTypeOrErr = import(QualType(T->getClass(), 0));
+ if (!ClassTypeOrErr)
+ return ClassTypeOrErr.takeError();
- QualType ClassType = Importer.Import(QualType(T->getClass(), 0));
- return Importer.getToContext().getMemberPointerType(ToPointeeType,
- ClassType.getTypePtr());
+ return Importer.getToContext().getMemberPointerType(
+ *ToPointeeTypeOrErr, (*ClassTypeOrErr).getTypePtr());
}
-QualType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ExpectedType
+ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getConstantArrayType(ToElementType,
+ return Importer.getToContext().getConstantArrayType(*ToElementTypeOrErr,
T->getSize(),
T->getSizeModifier(),
T->getIndexTypeCVRQualifiers());
}
-QualType
+ExpectedType
ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getIncompleteArrayType(ToElementType,
+ return Importer.getToContext().getIncompleteArrayType(*ToElementTypeOrErr,
T->getSizeModifier(),
T->getIndexTypeCVRQualifiers());
}
-QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
-
- Expr *Size = Importer.Import(T->getSizeExpr());
- if (!Size)
- return {};
+ExpectedType
+ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
+ QualType ToElementType;
+ Expr *ToSizeExpr;
+ SourceRange ToBracketsRange;
+ if (auto Imp = importSeq(
+ T->getElementType(), T->getSizeExpr(), T->getBracketsRange()))
+ std::tie(ToElementType, ToSizeExpr, ToBracketsRange) = *Imp;
+ else
+ return Imp.takeError();
- SourceRange Brackets = Importer.Import(T->getBracketsRange());
- return Importer.getToContext().getVariableArrayType(ToElementType, Size,
- T->getSizeModifier(),
- T->getIndexTypeCVRQualifiers(),
- Brackets);
+ return Importer.getToContext().getVariableArrayType(
+ ToElementType, ToSizeExpr, T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(), ToBracketsRange);
}
-QualType ASTNodeImporter::VisitDependentSizedArrayType(
+ExpectedType ASTNodeImporter::VisitDependentSizedArrayType(
const DependentSizedArrayType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
-
+ QualType ToElementType;
+ Expr *ToSizeExpr;
+ SourceRange ToBracketsRange;
+ if (auto Imp = importSeq(
+ T->getElementType(), T->getSizeExpr(), T->getBracketsRange()))
+ std::tie(ToElementType, ToSizeExpr, ToBracketsRange) = *Imp;
+ else
+ return Imp.takeError();
// SizeExpr may be null if size is not specified directly.
// For example, 'int a[]'.
- Expr *Size = Importer.Import(T->getSizeExpr());
- if (!Size && T->getSizeExpr())
- return {};
- SourceRange Brackets = Importer.Import(T->getBracketsRange());
return Importer.getToContext().getDependentSizedArrayType(
- ToElementType, Size, T->getSizeModifier(), T->getIndexTypeCVRQualifiers(),
- Brackets);
+ ToElementType, ToSizeExpr, T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(), ToBracketsRange);
}
-QualType ASTNodeImporter::VisitVectorType(const VectorType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitVectorType(const VectorType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getVectorType(ToElementType,
+ return Importer.getToContext().getVectorType(*ToElementTypeOrErr,
T->getNumElements(),
T->getVectorKind());
}
-QualType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getExtVectorType(ToElementType,
+ return Importer.getToContext().getExtVectorType(*ToElementTypeOrErr,
T->getNumElements());
}
-QualType
+ExpectedType
ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
// FIXME: What happens if we're importing a function without a prototype
// into C++? Should we make it variadic?
- QualType ToResultType = Importer.Import(T->getReturnType());
- if (ToResultType.isNull())
- return {};
+ ExpectedType ToReturnTypeOrErr = import(T->getReturnType());
+ if (!ToReturnTypeOrErr)
+ return ToReturnTypeOrErr.takeError();
- return Importer.getToContext().getFunctionNoProtoType(ToResultType,
+ return Importer.getToContext().getFunctionNoProtoType(*ToReturnTypeOrErr,
T->getExtInfo());
}
-QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
- QualType ToResultType = Importer.Import(T->getReturnType());
- if (ToResultType.isNull())
- return {};
+ExpectedType
+ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
+ ExpectedType ToReturnTypeOrErr = import(T->getReturnType());
+ if (!ToReturnTypeOrErr)
+ return ToReturnTypeOrErr.takeError();
// Import argument types
SmallVector<QualType, 4> ArgTypes;
for (const auto &A : T->param_types()) {
- QualType ArgType = Importer.Import(A);
- if (ArgType.isNull())
- return {};
- ArgTypes.push_back(ArgType);
+ ExpectedType TyOrErr = import(A);
+ if (!TyOrErr)
+ return TyOrErr.takeError();
+ ArgTypes.push_back(*TyOrErr);
}
// Import exception types
SmallVector<QualType, 4> ExceptionTypes;
for (const auto &E : T->exceptions()) {
- QualType ExceptionType = Importer.Import(E);
- if (ExceptionType.isNull())
- return {};
- ExceptionTypes.push_back(ExceptionType);
+ ExpectedType TyOrErr = import(E);
+ if (!TyOrErr)
+ return TyOrErr.takeError();
+ ExceptionTypes.push_back(*TyOrErr);
}
FunctionProtoType::ExtProtoInfo FromEPI = T->getExtProtoInfo();
FunctionProtoType::ExtProtoInfo ToEPI;
+ auto Imp = importSeq(
+ FromEPI.ExceptionSpec.NoexceptExpr,
+ FromEPI.ExceptionSpec.SourceDecl,
+ FromEPI.ExceptionSpec.SourceTemplate);
+ if (!Imp)
+ return Imp.takeError();
+
ToEPI.ExtInfo = FromEPI.ExtInfo;
ToEPI.Variadic = FromEPI.Variadic;
ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn;
@@ -835,112 +1229,108 @@ QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
ToEPI.RefQualifier = FromEPI.RefQualifier;
ToEPI.ExceptionSpec.Type = FromEPI.ExceptionSpec.Type;
ToEPI.ExceptionSpec.Exceptions = ExceptionTypes;
- ToEPI.ExceptionSpec.NoexceptExpr =
- Importer.Import(FromEPI.ExceptionSpec.NoexceptExpr);
- ToEPI.ExceptionSpec.SourceDecl = cast_or_null<FunctionDecl>(
- Importer.Import(FromEPI.ExceptionSpec.SourceDecl));
- ToEPI.ExceptionSpec.SourceTemplate = cast_or_null<FunctionDecl>(
- Importer.Import(FromEPI.ExceptionSpec.SourceTemplate));
+ std::tie(
+ ToEPI.ExceptionSpec.NoexceptExpr,
+ ToEPI.ExceptionSpec.SourceDecl,
+ ToEPI.ExceptionSpec.SourceTemplate) = *Imp;
- return Importer.getToContext().getFunctionType(ToResultType, ArgTypes, ToEPI);
+ return Importer.getToContext().getFunctionType(
+ *ToReturnTypeOrErr, ArgTypes, ToEPI);
}
-QualType ASTNodeImporter::VisitUnresolvedUsingType(
+ExpectedType ASTNodeImporter::VisitUnresolvedUsingType(
const UnresolvedUsingType *T) {
- const auto *ToD =
- cast_or_null<UnresolvedUsingTypenameDecl>(Importer.Import(T->getDecl()));
- if (!ToD)
- return {};
-
- auto *ToPrevD =
- cast_or_null<UnresolvedUsingTypenameDecl>(
- Importer.Import(T->getDecl()->getPreviousDecl()));
- if (!ToPrevD && T->getDecl()->getPreviousDecl())
- return {};
+ UnresolvedUsingTypenameDecl *ToD;
+ Decl *ToPrevD;
+ if (auto Imp = importSeq(T->getDecl(), T->getDecl()->getPreviousDecl()))
+ std::tie(ToD, ToPrevD) = *Imp;
+ else
+ return Imp.takeError();
- return Importer.getToContext().getTypeDeclType(ToD, ToPrevD);
+ return Importer.getToContext().getTypeDeclType(
+ ToD, cast_or_null<TypeDecl>(ToPrevD));
}
-QualType ASTNodeImporter::VisitParenType(const ParenType *T) {
- QualType ToInnerType = Importer.Import(T->getInnerType());
- if (ToInnerType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitParenType(const ParenType *T) {
+ ExpectedType ToInnerTypeOrErr = import(T->getInnerType());
+ if (!ToInnerTypeOrErr)
+ return ToInnerTypeOrErr.takeError();
- return Importer.getToContext().getParenType(ToInnerType);
+ return Importer.getToContext().getParenType(*ToInnerTypeOrErr);
}
-QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
- auto *ToDecl =
- dyn_cast_or_null<TypedefNameDecl>(Importer.Import(T->getDecl()));
- if (!ToDecl)
- return {};
+ExpectedType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
+ Expected<TypedefNameDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- return Importer.getToContext().getTypeDeclType(ToDecl);
+ return Importer.getToContext().getTypeDeclType(*ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
- Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
- if (!ToExpr)
- return {};
+ExpectedType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
+ ExpectedExpr ToExprOrErr = import(T->getUnderlyingExpr());
+ if (!ToExprOrErr)
+ return ToExprOrErr.takeError();
- return Importer.getToContext().getTypeOfExprType(ToExpr);
+ return Importer.getToContext().getTypeOfExprType(*ToExprOrErr);
}
-QualType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
- QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
- if (ToUnderlyingType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
+ ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType());
+ if (!ToUnderlyingTypeOrErr)
+ return ToUnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getTypeOfType(ToUnderlyingType);
+ return Importer.getToContext().getTypeOfType(*ToUnderlyingTypeOrErr);
}
-QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
+ExpectedType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
// FIXME: Make sure that the "to" context supports C++0x!
- Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
- if (!ToExpr)
- return {};
+ ExpectedExpr ToExprOrErr = import(T->getUnderlyingExpr());
+ if (!ToExprOrErr)
+ return ToExprOrErr.takeError();
- QualType UnderlyingType = Importer.Import(T->getUnderlyingType());
- if (UnderlyingType.isNull())
- return {};
+ ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType());
+ if (!ToUnderlyingTypeOrErr)
+ return ToUnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getDecltypeType(ToExpr, UnderlyingType);
+ return Importer.getToContext().getDecltypeType(
+ *ToExprOrErr, *ToUnderlyingTypeOrErr);
}
-QualType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
- QualType ToBaseType = Importer.Import(T->getBaseType());
- QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
- if (ToBaseType.isNull() || ToUnderlyingType.isNull())
- return {};
+ExpectedType
+ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
+ ExpectedType ToBaseTypeOrErr = import(T->getBaseType());
+ if (!ToBaseTypeOrErr)
+ return ToBaseTypeOrErr.takeError();
+
+ ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType());
+ if (!ToUnderlyingTypeOrErr)
+ return ToUnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getUnaryTransformType(ToBaseType,
- ToUnderlyingType,
- T->getUTTKind());
+ return Importer.getToContext().getUnaryTransformType(
+ *ToBaseTypeOrErr, *ToUnderlyingTypeOrErr, T->getUTTKind());
}
-QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
+ExpectedType ASTNodeImporter::VisitAutoType(const AutoType *T) {
// FIXME: Make sure that the "to" context supports C++11!
- QualType FromDeduced = T->getDeducedType();
- QualType ToDeduced;
- if (!FromDeduced.isNull()) {
- ToDeduced = Importer.Import(FromDeduced);
- if (ToDeduced.isNull())
- return {};
- }
+ ExpectedType ToDeducedTypeOrErr = import(T->getDeducedType());
+ if (!ToDeducedTypeOrErr)
+ return ToDeducedTypeOrErr.takeError();
- return Importer.getToContext().getAutoType(ToDeduced, T->getKeyword(),
+ return Importer.getToContext().getAutoType(*ToDeducedTypeOrErr,
+ T->getKeyword(),
/*IsDependent*/false);
}
-QualType ASTNodeImporter::VisitInjectedClassNameType(
+ExpectedType ASTNodeImporter::VisitInjectedClassNameType(
const InjectedClassNameType *T) {
- auto *D = cast_or_null<CXXRecordDecl>(Importer.Import(T->getDecl()));
- if (!D)
- return {};
+ Expected<CXXRecordDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- QualType InjType = Importer.Import(T->getInjectedSpecializationType());
- if (InjType.isNull())
- return {};
+ ExpectedType ToInjTypeOrErr = import(T->getInjectedSpecializationType());
+ if (!ToInjTypeOrErr)
+ return ToInjTypeOrErr.takeError();
// FIXME: ASTContext::getInjectedClassNameType is not suitable for AST reading
// See comments in InjectedClassNameType definition for details
@@ -951,220 +1341,208 @@ QualType ASTNodeImporter::VisitInjectedClassNameType(
};
return QualType(new (Importer.getToContext(), TypeAlignment)
- InjectedClassNameType(D, InjType), 0);
+ InjectedClassNameType(*ToDeclOrErr, *ToInjTypeOrErr), 0);
}
-QualType ASTNodeImporter::VisitRecordType(const RecordType *T) {
- auto *ToDecl = dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl()));
- if (!ToDecl)
- return {};
+ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) {
+ Expected<RecordDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- return Importer.getToContext().getTagDeclType(ToDecl);
+ return Importer.getToContext().getTagDeclType(*ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitEnumType(const EnumType *T) {
- auto *ToDecl = dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl()));
- if (!ToDecl)
- return {};
+ExpectedType ASTNodeImporter::VisitEnumType(const EnumType *T) {
+ Expected<EnumDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- return Importer.getToContext().getTagDeclType(ToDecl);
+ return Importer.getToContext().getTagDeclType(*ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
- QualType FromModifiedType = T->getModifiedType();
- QualType FromEquivalentType = T->getEquivalentType();
- QualType ToModifiedType;
- QualType ToEquivalentType;
-
- if (!FromModifiedType.isNull()) {
- ToModifiedType = Importer.Import(FromModifiedType);
- if (ToModifiedType.isNull())
- return {};
- }
- if (!FromEquivalentType.isNull()) {
- ToEquivalentType = Importer.Import(FromEquivalentType);
- if (ToEquivalentType.isNull())
- return {};
- }
+ExpectedType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
+ ExpectedType ToModifiedTypeOrErr = import(T->getModifiedType());
+ if (!ToModifiedTypeOrErr)
+ return ToModifiedTypeOrErr.takeError();
+ ExpectedType ToEquivalentTypeOrErr = import(T->getEquivalentType());
+ if (!ToEquivalentTypeOrErr)
+ return ToEquivalentTypeOrErr.takeError();
return Importer.getToContext().getAttributedType(T->getAttrKind(),
- ToModifiedType, ToEquivalentType);
+ *ToModifiedTypeOrErr, *ToEquivalentTypeOrErr);
}
-QualType ASTNodeImporter::VisitTemplateTypeParmType(
+ExpectedType ASTNodeImporter::VisitTemplateTypeParmType(
const TemplateTypeParmType *T) {
- auto *ParmDecl =
- cast_or_null<TemplateTypeParmDecl>(Importer.Import(T->getDecl()));
- if (!ParmDecl && T->getDecl())
- return {};
+ Expected<TemplateTypeParmDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
return Importer.getToContext().getTemplateTypeParmType(
- T->getDepth(), T->getIndex(), T->isParameterPack(), ParmDecl);
+ T->getDepth(), T->getIndex(), T->isParameterPack(), *ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitSubstTemplateTypeParmType(
+ExpectedType ASTNodeImporter::VisitSubstTemplateTypeParmType(
const SubstTemplateTypeParmType *T) {
- const auto *Replaced =
- cast_or_null<TemplateTypeParmType>(Importer.Import(
- QualType(T->getReplacedParameter(), 0)).getTypePtr());
- if (!Replaced)
- return {};
+ ExpectedType ReplacedOrErr = import(QualType(T->getReplacedParameter(), 0));
+ if (!ReplacedOrErr)
+ return ReplacedOrErr.takeError();
+ const TemplateTypeParmType *Replaced =
+ cast<TemplateTypeParmType>((*ReplacedOrErr).getTypePtr());
- QualType Replacement = Importer.Import(T->getReplacementType());
- if (Replacement.isNull())
- return {};
- Replacement = Replacement.getCanonicalType();
+ ExpectedType ToReplacementTypeOrErr = import(T->getReplacementType());
+ if (!ToReplacementTypeOrErr)
+ return ToReplacementTypeOrErr.takeError();
return Importer.getToContext().getSubstTemplateTypeParmType(
- Replaced, Replacement);
+ Replaced, (*ToReplacementTypeOrErr).getCanonicalType());
}
-QualType ASTNodeImporter::VisitTemplateSpecializationType(
+ExpectedType ASTNodeImporter::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
- TemplateName ToTemplate = Importer.Import(T->getTemplateName());
- if (ToTemplate.isNull())
- return {};
+ auto ToTemplateOrErr = import(T->getTemplateName());
+ if (!ToTemplateOrErr)
+ return ToTemplateOrErr.takeError();
SmallVector<TemplateArgument, 2> ToTemplateArgs;
- if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToTemplateArgs))
- return {};
+ if (Error Err = ImportTemplateArguments(
+ T->getArgs(), T->getNumArgs(), ToTemplateArgs))
+ return std::move(Err);
QualType ToCanonType;
if (!QualType(T, 0).isCanonical()) {
QualType FromCanonType
= Importer.getFromContext().getCanonicalType(QualType(T, 0));
- ToCanonType =Importer.Import(FromCanonType);
- if (ToCanonType.isNull())
- return {};
+ if (ExpectedType TyOrErr = import(FromCanonType))
+ ToCanonType = *TyOrErr;
+ else
+ return TyOrErr.takeError();
}
- return Importer.getToContext().getTemplateSpecializationType(ToTemplate,
+ return Importer.getToContext().getTemplateSpecializationType(*ToTemplateOrErr,
ToTemplateArgs,
ToCanonType);
}
-QualType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
- NestedNameSpecifier *ToQualifier = nullptr;
+ExpectedType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
// Note: the qualifier in an ElaboratedType is optional.
- if (T->getQualifier()) {
- ToQualifier = Importer.Import(T->getQualifier());
- if (!ToQualifier)
- return {};
- }
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
- QualType ToNamedType = Importer.Import(T->getNamedType());
- if (ToNamedType.isNull())
- return {};
+ ExpectedType ToNamedTypeOrErr = import(T->getNamedType());
+ if (!ToNamedTypeOrErr)
+ return ToNamedTypeOrErr.takeError();
- TagDecl *OwnedTagDecl =
- cast_or_null<TagDecl>(Importer.Import(T->getOwnedTagDecl()));
- if (!OwnedTagDecl && T->getOwnedTagDecl())
- return {};
+ Expected<TagDecl *> ToOwnedTagDeclOrErr = import(T->getOwnedTagDecl());
+ if (!ToOwnedTagDeclOrErr)
+ return ToOwnedTagDeclOrErr.takeError();
return Importer.getToContext().getElaboratedType(T->getKeyword(),
- ToQualifier, ToNamedType,
- OwnedTagDecl);
+ *ToQualifierOrErr,
+ *ToNamedTypeOrErr,
+ *ToOwnedTagDeclOrErr);
}
-QualType ASTNodeImporter::VisitPackExpansionType(const PackExpansionType *T) {
- QualType Pattern = Importer.Import(T->getPattern());
- if (Pattern.isNull())
- return {};
+ExpectedType
+ASTNodeImporter::VisitPackExpansionType(const PackExpansionType *T) {
+ ExpectedType ToPatternOrErr = import(T->getPattern());
+ if (!ToPatternOrErr)
+ return ToPatternOrErr.takeError();
- return Importer.getToContext().getPackExpansionType(Pattern,
+ return Importer.getToContext().getPackExpansionType(*ToPatternOrErr,
T->getNumExpansions());
}
-QualType ASTNodeImporter::VisitDependentTemplateSpecializationType(
+ExpectedType ASTNodeImporter::VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType *T) {
- NestedNameSpecifier *Qualifier = Importer.Import(T->getQualifier());
- if (!Qualifier && T->getQualifier())
- return {};
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
- IdentifierInfo *Name = Importer.Import(T->getIdentifier());
- if (!Name && T->getIdentifier())
- return {};
+ IdentifierInfo *ToName = Importer.Import(T->getIdentifier());
SmallVector<TemplateArgument, 2> ToPack;
ToPack.reserve(T->getNumArgs());
- if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToPack))
- return {};
+ if (Error Err = ImportTemplateArguments(
+ T->getArgs(), T->getNumArgs(), ToPack))
+ return std::move(Err);
return Importer.getToContext().getDependentTemplateSpecializationType(
- T->getKeyword(), Qualifier, Name, ToPack);
+ T->getKeyword(), *ToQualifierOrErr, ToName, ToPack);
}
-QualType ASTNodeImporter::VisitDependentNameType(const DependentNameType *T) {
- NestedNameSpecifier *NNS = Importer.Import(T->getQualifier());
- if (!NNS && T->getQualifier())
- return QualType();
+ExpectedType
+ASTNodeImporter::VisitDependentNameType(const DependentNameType *T) {
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
IdentifierInfo *Name = Importer.Import(T->getIdentifier());
- if (!Name && T->getIdentifier())
- return QualType();
- QualType Canon = (T == T->getCanonicalTypeInternal().getTypePtr())
- ? QualType()
- : Importer.Import(T->getCanonicalTypeInternal());
- if (!Canon.isNull())
- Canon = Canon.getCanonicalType();
+ QualType Canon;
+ if (T != T->getCanonicalTypeInternal().getTypePtr()) {
+ if (ExpectedType TyOrErr = import(T->getCanonicalTypeInternal()))
+ Canon = (*TyOrErr).getCanonicalType();
+ else
+ return TyOrErr.takeError();
+ }
- return Importer.getToContext().getDependentNameType(T->getKeyword(), NNS,
+ return Importer.getToContext().getDependentNameType(T->getKeyword(),
+ *ToQualifierOrErr,
Name, Canon);
}
-QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
- auto *Class =
- dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl()));
- if (!Class)
- return {};
+ExpectedType
+ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ Expected<ObjCInterfaceDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- return Importer.getToContext().getObjCInterfaceType(Class);
+ return Importer.getToContext().getObjCInterfaceType(*ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
- QualType ToBaseType = Importer.Import(T->getBaseType());
- if (ToBaseType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
+ ExpectedType ToBaseTypeOrErr = import(T->getBaseType());
+ if (!ToBaseTypeOrErr)
+ return ToBaseTypeOrErr.takeError();
SmallVector<QualType, 4> TypeArgs;
for (auto TypeArg : T->getTypeArgsAsWritten()) {
- QualType ImportedTypeArg = Importer.Import(TypeArg);
- if (ImportedTypeArg.isNull())
- return {};
-
- TypeArgs.push_back(ImportedTypeArg);
+ if (ExpectedType TyOrErr = import(TypeArg))
+ TypeArgs.push_back(*TyOrErr);
+ else
+ return TyOrErr.takeError();
}
SmallVector<ObjCProtocolDecl *, 4> Protocols;
for (auto *P : T->quals()) {
- auto *Protocol = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(P));
- if (!Protocol)
- return {};
- Protocols.push_back(Protocol);
+ if (Expected<ObjCProtocolDecl *> ProtocolOrErr = import(P))
+ Protocols.push_back(*ProtocolOrErr);
+ else
+ return ProtocolOrErr.takeError();
+
}
- return Importer.getToContext().getObjCObjectType(ToBaseType, TypeArgs,
+ return Importer.getToContext().getObjCObjectType(*ToBaseTypeOrErr, TypeArgs,
Protocols,
T->isKindOfTypeAsWritten());
}
-QualType
+ExpectedType
ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
- QualType ToPointeeType = Importer.Import(T->getPointeeType());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getObjCObjectPointerType(ToPointeeType);
+ return Importer.getToContext().getObjCObjectPointerType(*ToPointeeTypeOrErr);
}
//----------------------------------------------------------------------------
// Import Declarations
//----------------------------------------------------------------------------
-bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC,
- DeclContext *&LexicalDC,
- DeclarationName &Name,
- NamedDecl *&ToD,
- SourceLocation &Loc) {
+Error ASTNodeImporter::ImportDeclParts(
+ NamedDecl *D, DeclContext *&DC, DeclContext *&LexicalDC,
+ DeclarationName &Name, NamedDecl *&ToD, SourceLocation &Loc) {
// Check if RecordDecl is in FunctionDecl parameters to avoid infinite loop.
// example: int struct_in_proto(struct data_t{int a;int b;} *d);
DeclContext *OrigDC = D->getDeclContext();
@@ -1184,66 +1562,66 @@ bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC,
if (RT && RT->getDecl() == D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
- return true;
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
}
}
// Import the context of this declaration.
- DC = Importer.ImportContext(OrigDC);
- if (!DC)
- return true;
-
- LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return true;
- }
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return Err;
// Import the name of this declaration.
- Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return true;
+ if (Error Err = importInto(Name, D->getDeclName()))
+ return Err;
// Import the location of this declaration.
- Loc = Importer.Import(D->getLocation());
+ if (Error Err = importInto(Loc, D->getLocation()))
+ return Err;
+
ToD = cast_or_null<NamedDecl>(Importer.GetAlreadyImportedOrNull(D));
- return false;
+ if (ToD)
+ if (Error Err = ASTNodeImporter(*this).ImportDefinitionIfNeeded(D, ToD))
+ return Err;
+
+ return Error::success();
}
-void ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
+Error ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
if (!FromD)
- return;
-
- if (!ToD) {
- ToD = Importer.Import(FromD);
- if (!ToD)
- return;
- }
+ return Error::success();
- if (auto *FromRecord = dyn_cast<RecordDecl>(FromD)) {
- if (auto *ToRecord = cast_or_null<RecordDecl>(ToD)) {
- if (FromRecord->getDefinition() && FromRecord->isCompleteDefinition() && !ToRecord->getDefinition()) {
- ImportDefinition(FromRecord, ToRecord);
+ if (!ToD)
+ if (Error Err = importInto(ToD, FromD))
+ return Err;
+
+ if (RecordDecl *FromRecord = dyn_cast<RecordDecl>(FromD)) {
+ if (RecordDecl *ToRecord = cast<RecordDecl>(ToD)) {
+ if (FromRecord->getDefinition() && FromRecord->isCompleteDefinition() &&
+ !ToRecord->getDefinition()) {
+ if (Error Err = ImportDefinition(FromRecord, ToRecord))
+ return Err;
}
}
- return;
+ return Error::success();
}
- if (auto *FromEnum = dyn_cast<EnumDecl>(FromD)) {
- if (auto *ToEnum = cast_or_null<EnumDecl>(ToD)) {
+ if (EnumDecl *FromEnum = dyn_cast<EnumDecl>(FromD)) {
+ if (EnumDecl *ToEnum = cast<EnumDecl>(ToD)) {
if (FromEnum->getDefinition() && !ToEnum->getDefinition()) {
- ImportDefinition(FromEnum, ToEnum);
+ if (Error Err = ImportDefinition(FromEnum, ToEnum))
+ return Err;
}
}
- return;
+ return Error::success();
}
+
+ return Error::success();
}
-void
-ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
- DeclarationNameInfo& To) {
+Error
+ASTNodeImporter::ImportDeclarationNameLoc(
+ const DeclarationNameInfo &From, DeclarationNameInfo& To) {
// NOTE: To.Name and To.Loc are already imported.
// We only have to import To.LocInfo.
switch (To.getName().getNameKind()) {
@@ -1253,76 +1631,121 @@ ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::CXXUsingDirective:
case DeclarationName::CXXDeductionGuideName:
- return;
+ return Error::success();
case DeclarationName::CXXOperatorName: {
- SourceRange Range = From.getCXXOperatorNameRange();
- To.setCXXOperatorNameRange(Importer.Import(Range));
- return;
+ if (auto ToRangeOrErr = import(From.getCXXOperatorNameRange()))
+ To.setCXXOperatorNameRange(*ToRangeOrErr);
+ else
+ return ToRangeOrErr.takeError();
+ return Error::success();
}
case DeclarationName::CXXLiteralOperatorName: {
- SourceLocation Loc = From.getCXXLiteralOperatorNameLoc();
- To.setCXXLiteralOperatorNameLoc(Importer.Import(Loc));
- return;
+ if (ExpectedSLoc LocOrErr = import(From.getCXXLiteralOperatorNameLoc()))
+ To.setCXXLiteralOperatorNameLoc(*LocOrErr);
+ else
+ return LocOrErr.takeError();
+ return Error::success();
}
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName: {
- TypeSourceInfo *FromTInfo = From.getNamedTypeInfo();
- To.setNamedTypeInfo(Importer.Import(FromTInfo));
- return;
+ if (auto ToTInfoOrErr = import(From.getNamedTypeInfo()))
+ To.setNamedTypeInfo(*ToTInfoOrErr);
+ else
+ return ToTInfoOrErr.takeError();
+ return Error::success();
}
}
llvm_unreachable("Unknown name kind.");
}
-void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
+Error
+ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
if (Importer.isMinimalImport() && !ForceImport) {
- Importer.ImportContext(FromDC);
- return;
+ auto ToDCOrErr = Importer.ImportContext(FromDC);
+ return ToDCOrErr.takeError();
}
+ llvm::SmallVector<Decl *, 8> ImportedDecls;
+ for (auto *From : FromDC->decls()) {
+ ExpectedDecl ImportedOrErr = import(From);
+ if (!ImportedOrErr)
+ // Ignore the error, continue with next Decl.
+ // FIXME: Handle this case somehow better.
+ consumeError(ImportedOrErr.takeError());
+ }
+
+ return Error::success();
+}
+
+Error ASTNodeImporter::ImportDeclContext(
+ Decl *FromD, DeclContext *&ToDC, DeclContext *&ToLexicalDC) {
+ auto ToDCOrErr = Importer.ImportContext(FromD->getDeclContext());
+ if (!ToDCOrErr)
+ return ToDCOrErr.takeError();
+ ToDC = *ToDCOrErr;
+
+ if (FromD->getDeclContext() != FromD->getLexicalDeclContext()) {
+ auto ToLexicalDCOrErr = Importer.ImportContext(
+ FromD->getLexicalDeclContext());
+ if (!ToLexicalDCOrErr)
+ return ToLexicalDCOrErr.takeError();
+ ToLexicalDC = *ToLexicalDCOrErr;
+ } else
+ ToLexicalDC = ToDC;
- for (auto *From : FromDC->decls())
- Importer.Import(From);
+ return Error::success();
}
-void ASTNodeImporter::ImportImplicitMethods(
+Error ASTNodeImporter::ImportImplicitMethods(
const CXXRecordDecl *From, CXXRecordDecl *To) {
assert(From->isCompleteDefinition() && To->getDefinition() == To &&
"Import implicit methods to or from non-definition");
for (CXXMethodDecl *FromM : From->methods())
- if (FromM->isImplicit())
- Importer.Import(FromM);
+ if (FromM->isImplicit()) {
+ Expected<CXXMethodDecl *> ToMOrErr = import(FromM);
+ if (!ToMOrErr)
+ return ToMOrErr.takeError();
+ }
+
+ return Error::success();
}
-static void setTypedefNameForAnonDecl(TagDecl *From, TagDecl *To,
- ASTImporter &Importer) {
+static Error setTypedefNameForAnonDecl(TagDecl *From, TagDecl *To,
+ ASTImporter &Importer) {
if (TypedefNameDecl *FromTypedef = From->getTypedefNameForAnonDecl()) {
- auto *ToTypedef =
- cast_or_null<TypedefNameDecl>(Importer.Import(FromTypedef));
- assert (ToTypedef && "Failed to import typedef of an anonymous structure");
-
- To->setTypedefNameForAnonDecl(ToTypedef);
+ Decl *ToTypedef = Importer.Import(FromTypedef);
+ if (!ToTypedef)
+ return make_error<ImportError>();
+ To->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(ToTypedef));
+ // FIXME: This should be the final code.
+ //if (Expected<Decl *> ToTypedefOrErr = Importer.Import(FromTypedef))
+ // To->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(*ToTypedefOrErr));
+ //else
+ // return ToTypedefOrErr.takeError();
}
+ return Error::success();
}
-bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
- ImportDefinitionKind Kind) {
+Error ASTNodeImporter::ImportDefinition(
+ RecordDecl *From, RecordDecl *To, ImportDefinitionKind Kind) {
if (To->getDefinition() || To->isBeingDefined()) {
if (Kind == IDK_Everything)
- ImportDeclContext(From, /*ForceImport=*/true);
+ return ImportDeclContext(From, /*ForceImport=*/true);
- return false;
+ return Error::success();
}
To->startDefinition();
- setTypedefNameForAnonDecl(From, To, Importer);
+ if (Error Err = setTypedefNameForAnonDecl(From, To, Importer))
+ return Err;
// Add base classes.
- if (auto *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
- auto *FromCXX = cast<CXXRecordDecl>(From);
+ auto *ToCXX = dyn_cast<CXXRecordDecl>(To);
+ auto *FromCXX = dyn_cast<CXXRecordDecl>(From);
+ if (ToCXX && FromCXX && ToCXX->dataPtr() && FromCXX->dataPtr()) {
struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data();
struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data();
@@ -1394,50 +1817,65 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
SmallVector<CXXBaseSpecifier *, 4> Bases;
for (const auto &Base1 : FromCXX->bases()) {
- QualType T = Importer.Import(Base1.getType());
- if (T.isNull())
- return true;
+ ExpectedType TyOrErr = import(Base1.getType());
+ if (!TyOrErr)
+ return TyOrErr.takeError();
SourceLocation EllipsisLoc;
- if (Base1.isPackExpansion())
- EllipsisLoc = Importer.Import(Base1.getEllipsisLoc());
+ if (Base1.isPackExpansion()) {
+ if (ExpectedSLoc LocOrErr = import(Base1.getEllipsisLoc()))
+ EllipsisLoc = *LocOrErr;
+ else
+ return LocOrErr.takeError();
+ }
// Ensure that we have a definition for the base.
- ImportDefinitionIfNeeded(Base1.getType()->getAsCXXRecordDecl());
+ if (Error Err =
+ ImportDefinitionIfNeeded(Base1.getType()->getAsCXXRecordDecl()))
+ return Err;
+
+ auto RangeOrErr = import(Base1.getSourceRange());
+ if (!RangeOrErr)
+ return RangeOrErr.takeError();
+
+ auto TSIOrErr = import(Base1.getTypeSourceInfo());
+ if (!TSIOrErr)
+ return TSIOrErr.takeError();
Bases.push_back(
- new (Importer.getToContext())
- CXXBaseSpecifier(Importer.Import(Base1.getSourceRange()),
- Base1.isVirtual(),
- Base1.isBaseOfClass(),
- Base1.getAccessSpecifierAsWritten(),
- Importer.Import(Base1.getTypeSourceInfo()),
- EllipsisLoc));
+ new (Importer.getToContext()) CXXBaseSpecifier(
+ *RangeOrErr,
+ Base1.isVirtual(),
+ Base1.isBaseOfClass(),
+ Base1.getAccessSpecifierAsWritten(),
+ *TSIOrErr,
+ EllipsisLoc));
}
if (!Bases.empty())
ToCXX->setBases(Bases.data(), Bases.size());
}
if (shouldForceImportDeclContext(Kind))
- ImportDeclContext(From, /*ForceImport=*/true);
+ if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
+ return Err;
To->completeDefinition();
- return false;
+ return Error::success();
}
-bool ASTNodeImporter::ImportInitializer(VarDecl *From, VarDecl *To) {
+Error ASTNodeImporter::ImportInitializer(VarDecl *From, VarDecl *To) {
if (To->getAnyInitializer())
- return false;
+ return Error::success();
Expr *FromInit = From->getInit();
if (!FromInit)
- return false;
+ return Error::success();
- Expr *ToInit = Importer.Import(const_cast<Expr *>(FromInit));
- if (!ToInit)
- return true;
+ ExpectedExpr ToInitOrErr = import(FromInit);
+ if (!ToInitOrErr)
+ return ToInitOrErr.takeError();
- To->setInit(ToInit);
+ To->setInit(*ToInitOrErr);
if (From->isInitKnownICE()) {
EvaluatedStmt *Eval = To->ensureEvaluatedStmt();
Eval->CheckedICE = true;
@@ -1445,185 +1883,106 @@ bool ASTNodeImporter::ImportInitializer(VarDecl *From, VarDecl *To) {
}
// FIXME: Other bits to merge?
- return false;
+ return Error::success();
}
-bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
- ImportDefinitionKind Kind) {
+Error ASTNodeImporter::ImportDefinition(
+ EnumDecl *From, EnumDecl *To, ImportDefinitionKind Kind) {
if (To->getDefinition() || To->isBeingDefined()) {
if (Kind == IDK_Everything)
- ImportDeclContext(From, /*ForceImport=*/true);
- return false;
+ return ImportDeclContext(From, /*ForceImport=*/true);
+ return Error::success();
}
To->startDefinition();
- setTypedefNameForAnonDecl(From, To, Importer);
+ if (Error Err = setTypedefNameForAnonDecl(From, To, Importer))
+ return Err;
- QualType T = Importer.Import(Importer.getFromContext().getTypeDeclType(From));
- if (T.isNull())
- return true;
+ ExpectedType ToTypeOrErr =
+ import(Importer.getFromContext().getTypeDeclType(From));
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- QualType ToPromotionType = Importer.Import(From->getPromotionType());
- if (ToPromotionType.isNull())
- return true;
+ ExpectedType ToPromotionTypeOrErr = import(From->getPromotionType());
+ if (!ToPromotionTypeOrErr)
+ return ToPromotionTypeOrErr.takeError();
if (shouldForceImportDeclContext(Kind))
- ImportDeclContext(From, /*ForceImport=*/true);
+ if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
+ return Err;
// FIXME: we might need to merge the number of positive or negative bits
// if the enumerator lists don't match.
- To->completeDefinition(T, ToPromotionType,
+ To->completeDefinition(*ToTypeOrErr, *ToPromotionTypeOrErr,
From->getNumPositiveBits(),
From->getNumNegativeBits());
- return false;
+ return Error::success();
}
-TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList(
- TemplateParameterList *Params) {
+// FIXME: Remove this, use `import` instead.
+Expected<TemplateParameterList *> ASTNodeImporter::ImportTemplateParameterList(
+ TemplateParameterList *Params) {
SmallVector<NamedDecl *, 4> ToParams(Params->size());
- if (ImportContainerChecked(*Params, ToParams))
- return nullptr;
+ if (Error Err = ImportContainerChecked(*Params, ToParams))
+ return std::move(Err);
Expr *ToRequiresClause;
if (Expr *const R = Params->getRequiresClause()) {
- ToRequiresClause = Importer.Import(R);
- if (!ToRequiresClause)
- return nullptr;
+ if (Error Err = importInto(ToRequiresClause, R))
+ return std::move(Err);
} else {
ToRequiresClause = nullptr;
}
- return TemplateParameterList::Create(Importer.getToContext(),
- Importer.Import(Params->getTemplateLoc()),
- Importer.Import(Params->getLAngleLoc()),
- ToParams,
- Importer.Import(Params->getRAngleLoc()),
- ToRequiresClause);
-}
-
-TemplateArgument
-ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
- switch (From.getKind()) {
- case TemplateArgument::Null:
- return TemplateArgument();
-
- case TemplateArgument::Type: {
- QualType ToType = Importer.Import(From.getAsType());
- if (ToType.isNull())
- return {};
- return TemplateArgument(ToType);
- }
-
- case TemplateArgument::Integral: {
- QualType ToType = Importer.Import(From.getIntegralType());
- if (ToType.isNull())
- return {};
- return TemplateArgument(From, ToType);
- }
-
- case TemplateArgument::Declaration: {
- auto *To = cast_or_null<ValueDecl>(Importer.Import(From.getAsDecl()));
- QualType ToType = Importer.Import(From.getParamTypeForDecl());
- if (!To || ToType.isNull())
- return {};
- return TemplateArgument(To, ToType);
- }
-
- case TemplateArgument::NullPtr: {
- QualType ToType = Importer.Import(From.getNullPtrType());
- if (ToType.isNull())
- return {};
- return TemplateArgument(ToType, /*isNullPtr*/true);
- }
-
- case TemplateArgument::Template: {
- TemplateName ToTemplate = Importer.Import(From.getAsTemplate());
- if (ToTemplate.isNull())
- return {};
-
- return TemplateArgument(ToTemplate);
- }
-
- case TemplateArgument::TemplateExpansion: {
- TemplateName ToTemplate
- = Importer.Import(From.getAsTemplateOrTemplatePattern());
- if (ToTemplate.isNull())
- return {};
-
- return TemplateArgument(ToTemplate, From.getNumTemplateExpansions());
- }
-
- case TemplateArgument::Expression:
- if (Expr *ToExpr = Importer.Import(From.getAsExpr()))
- return TemplateArgument(ToExpr);
- return TemplateArgument();
-
- case TemplateArgument::Pack: {
- SmallVector<TemplateArgument, 2> ToPack;
- ToPack.reserve(From.pack_size());
- if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack))
- return {};
-
- return TemplateArgument(
- llvm::makeArrayRef(ToPack).copy(Importer.getToContext()));
- }
- }
-
- llvm_unreachable("Invalid template argument kind");
-}
-
-Optional<TemplateArgumentLoc>
-ASTNodeImporter::ImportTemplateArgumentLoc(const TemplateArgumentLoc &TALoc) {
- TemplateArgument Arg = ImportTemplateArgument(TALoc.getArgument());
- TemplateArgumentLocInfo FromInfo = TALoc.getLocInfo();
- TemplateArgumentLocInfo ToInfo;
- if (Arg.getKind() == TemplateArgument::Expression) {
- Expr *E = Importer.Import(FromInfo.getAsExpr());
- ToInfo = TemplateArgumentLocInfo(E);
- if (!E)
- return None;
- } else if (Arg.getKind() == TemplateArgument::Type) {
- if (TypeSourceInfo *TSI = Importer.Import(FromInfo.getAsTypeSourceInfo()))
- ToInfo = TemplateArgumentLocInfo(TSI);
+ auto ToTemplateLocOrErr = import(Params->getTemplateLoc());
+ if (!ToTemplateLocOrErr)
+ return ToTemplateLocOrErr.takeError();
+ auto ToLAngleLocOrErr = import(Params->getLAngleLoc());
+ if (!ToLAngleLocOrErr)
+ return ToLAngleLocOrErr.takeError();
+ auto ToRAngleLocOrErr = import(Params->getRAngleLoc());
+ if (!ToRAngleLocOrErr)
+ return ToRAngleLocOrErr.takeError();
+
+ return TemplateParameterList::Create(
+ Importer.getToContext(),
+ *ToTemplateLocOrErr,
+ *ToLAngleLocOrErr,
+ ToParams,
+ *ToRAngleLocOrErr,
+ ToRequiresClause);
+}
+
+Error ASTNodeImporter::ImportTemplateArguments(
+ const TemplateArgument *FromArgs, unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs) {
+ for (unsigned I = 0; I != NumFromArgs; ++I) {
+ if (auto ToOrErr = import(FromArgs[I]))
+ ToArgs.push_back(*ToOrErr);
else
- return None;
- } else {
- ToInfo = TemplateArgumentLocInfo(
- Importer.Import(FromInfo.getTemplateQualifierLoc()),
- Importer.Import(FromInfo.getTemplateNameLoc()),
- Importer.Import(FromInfo.getTemplateEllipsisLoc()));
+ return ToOrErr.takeError();
}
- return TemplateArgumentLoc(Arg, ToInfo);
-}
-bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
- unsigned NumFromArgs,
- SmallVectorImpl<TemplateArgument> &ToArgs) {
- for (unsigned I = 0; I != NumFromArgs; ++I) {
- TemplateArgument To = ImportTemplateArgument(FromArgs[I]);
- if (To.isNull() && !FromArgs[I].isNull())
- return true;
-
- ToArgs.push_back(To);
- }
+ return Error::success();
+}
- return false;
+// FIXME: Do not forget to remove this and use only 'import'.
+Expected<TemplateArgument>
+ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
+ return import(From);
}
-// We cannot use Optional<> pattern here and below because
-// TemplateArgumentListInfo's operator new is declared as deleted so it cannot
-// be stored in Optional.
template <typename InContainerTy>
-bool ASTNodeImporter::ImportTemplateArgumentListInfo(
+Error ASTNodeImporter::ImportTemplateArgumentListInfo(
const InContainerTy &Container, TemplateArgumentListInfo &ToTAInfo) {
for (const auto &FromLoc : Container) {
- if (auto ToLoc = ImportTemplateArgumentLoc(FromLoc))
- ToTAInfo.addArgument(*ToLoc);
+ if (auto ToLocOrErr = import(FromLoc))
+ ToTAInfo.addArgument(*ToLocOrErr);
else
- return true;
+ return ToLocOrErr.takeError();
}
- return false;
+ return Error::success();
}
static StructuralEquivalenceKind
@@ -1720,30 +2079,31 @@ bool ASTNodeImporter::IsStructuralMatch(VarTemplateDecl *From,
return Ctx.IsEquivalent(From, To);
}
-Decl *ASTNodeImporter::VisitDecl(Decl *D) {
+ExpectedDecl ASTNodeImporter::VisitDecl(Decl *D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
- return nullptr;
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
-Decl *ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) {
- // Import the context of this declaration.
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
+ExpectedDecl ASTNodeImporter::VisitImportDecl(ImportDecl *D) {
+ Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
+ << D->getDeclKindName();
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
+}
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- }
+ExpectedDecl ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) {
+ // Import the context of this declaration.
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
// Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
+ ExpectedSLoc LocOrErr = import(D->getLocation());
+ if (!LocOrErr)
+ return LocOrErr.takeError();
EmptyDecl *ToD;
- if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC, Loc))
+ if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC, *LocOrErr))
return ToD;
ToD->setLexicalDeclContext(LexicalDC);
@@ -1751,7 +2111,7 @@ Decl *ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
TranslationUnitDecl *ToD =
Importer.getToContext().getTranslationUnitDecl();
@@ -1760,18 +2120,23 @@ Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
- SourceLocation Loc = Importer.Import(D->getLocation());
- SourceLocation ColonLoc = Importer.Import(D->getColonLoc());
+ExpectedDecl ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ ExpectedSLoc LocOrErr = import(D->getLocation());
+ if (!LocOrErr)
+ return LocOrErr.takeError();
+ auto ColonLocOrErr = import(D->getColonLoc());
+ if (!ColonLocOrErr)
+ return ColonLocOrErr.takeError();
// Import the context of this declaration.
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
+ auto DCOrErr = Importer.ImportContext(D->getDeclContext());
+ if (!DCOrErr)
+ return DCOrErr.takeError();
+ DeclContext *DC = *DCOrErr;
AccessSpecDecl *ToD;
if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), D->getAccess(),
- DC, Loc, ColonLoc))
+ DC, *LocOrErr, *ColonLocOrErr))
return ToD;
// Lexical DeclContext and Semantic DeclContext
@@ -1782,29 +2147,26 @@ Decl *ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
-
+ExpectedDecl ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ auto DCOrErr = Importer.ImportContext(D->getDeclContext());
+ if (!DCOrErr)
+ return DCOrErr.takeError();
+ DeclContext *DC = *DCOrErr;
DeclContext *LexicalDC = DC;
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
-
- Expr *AssertExpr = Importer.Import(D->getAssertExpr());
- if (!AssertExpr)
- return nullptr;
-
- StringLiteral *FromMsg = D->getMessage();
- auto *ToMsg = cast_or_null<StringLiteral>(Importer.Import(FromMsg));
- if (!ToMsg && FromMsg)
- return nullptr;
+ SourceLocation ToLocation, ToRParenLoc;
+ Expr *ToAssertExpr;
+ StringLiteral *ToMessage;
+ if (auto Imp = importSeq(
+ D->getLocation(), D->getAssertExpr(), D->getMessage(), D->getRParenLoc()))
+ std::tie(ToLocation, ToAssertExpr, ToMessage, ToRParenLoc) = *Imp;
+ else
+ return Imp.takeError();
StaticAssertDecl *ToD;
if (GetImportedOrCreateDecl(
- ToD, D, Importer.getToContext(), DC, Loc, AssertExpr, ToMsg,
- Importer.Import(D->getRParenLoc()), D->isFailed()))
+ ToD, D, Importer.getToContext(), DC, ToLocation, ToAssertExpr, ToMessage,
+ ToRParenLoc, D->isFailed()))
return ToD;
ToD->setLexicalDeclContext(LexicalDC);
@@ -1812,14 +2174,14 @@ Decl *ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
// Import the major distinguishing characteristics of this namespace.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -1853,15 +2215,21 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Namespace,
ConflictingDecls.data(),
ConflictingDecls.size());
+ if (!Name)
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
// Create the "to" namespace, if needed.
NamespaceDecl *ToNamespace = MergeWithNamespace;
if (!ToNamespace) {
if (GetImportedOrCreateDecl(
ToNamespace, D, Importer.getToContext(), DC, D->isInline(),
- Importer.Import(D->getBeginLoc()), Loc, Name.getAsIdentifierInfo(),
+ *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(),
/*PrevDecl=*/nullptr))
return ToNamespace;
ToNamespace->setLexicalDeclContext(LexicalDC);
@@ -1878,43 +2246,42 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
}
Importer.MapImported(D, ToNamespace);
- ImportDeclContext(D);
+ if (Error Err = ImportDeclContext(D))
+ return std::move(Err);
return ToNamespace;
}
-Decl *ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
// Import the major distinguishing characteristics of this namespace.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *LookupD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, LookupD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, LookupD, Loc))
+ return std::move(Err);
if (LookupD)
return LookupD;
// NOTE: No conflict resolution is done for namespace aliases now.
- auto *TargetDecl = cast_or_null<NamespaceDecl>(
- Importer.Import(D->getNamespace()));
- if (!TargetDecl)
- return nullptr;
-
- IdentifierInfo *ToII = Importer.Import(D->getIdentifier());
- if (!ToII)
- return nullptr;
-
- NestedNameSpecifierLoc ToQLoc = Importer.Import(D->getQualifierLoc());
- if (D->getQualifierLoc() && !ToQLoc)
- return nullptr;
+ SourceLocation ToNamespaceLoc, ToAliasLoc, ToTargetNameLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ NamespaceDecl *ToNamespace;
+ if (auto Imp = importSeq(
+ D->getNamespaceLoc(), D->getAliasLoc(), D->getQualifierLoc(),
+ D->getTargetNameLoc(), D->getNamespace()))
+ std::tie(
+ ToNamespaceLoc, ToAliasLoc, ToQualifierLoc, ToTargetNameLoc,
+ ToNamespace) = *Imp;
+ else
+ return Imp.takeError();
+ IdentifierInfo *ToIdentifier = Importer.Import(D->getIdentifier());
NamespaceAliasDecl *ToD;
- if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC,
- Importer.Import(D->getNamespaceLoc()),
- Importer.Import(D->getAliasLoc()), ToII, ToQLoc,
- Importer.Import(D->getTargetNameLoc()),
- TargetDecl))
+ if (GetImportedOrCreateDecl(
+ ToD, D, Importer.getToContext(), DC, ToNamespaceLoc, ToAliasLoc,
+ ToIdentifier, ToQualifierLoc, ToTargetNameLoc, ToNamespace))
return ToD;
ToD->setLexicalDeclContext(LexicalDC);
@@ -1923,14 +2290,15 @@ Decl *ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
+ExpectedDecl
+ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
// Import the major distinguishing characteristics of this typedef.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -1946,9 +2314,16 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
if (auto *FoundTypedef = dyn_cast<TypedefNameDecl>(FoundDecl)) {
- if (Importer.IsStructurallyEquivalent(D->getUnderlyingType(),
- FoundTypedef->getUnderlyingType()))
- return Importer.MapImported(D, FoundTypedef);
+ QualType FromUT = D->getUnderlyingType();
+ QualType FoundUT = FoundTypedef->getUnderlyingType();
+ if (Importer.IsStructurallyEquivalent(FromUT, FoundUT)) {
+ // If the "From" context has a complete underlying type but we
+ // already have a complete underlying type then return with that.
+ if (!FromUT->isIncompleteType() && !FoundUT->isIncompleteType())
+ return Importer.MapImported(D, FoundTypedef);
+ }
+ // FIXME Handle redecl chain.
+ break;
}
ConflictingDecls.push_back(FoundDecl);
@@ -1959,28 +2334,30 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- // Import the underlying type of this typedef;
- QualType T = Importer.Import(D->getUnderlyingType());
- if (T.isNull())
- return nullptr;
+ QualType ToUnderlyingType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToBeginLoc;
+ if (auto Imp = importSeq(
+ D->getUnderlyingType(), D->getTypeSourceInfo(), D->getBeginLoc()))
+ std::tie(ToUnderlyingType, ToTypeSourceInfo, ToBeginLoc) = *Imp;
+ else
+ return Imp.takeError();
// Create the new typedef node.
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- SourceLocation StartL = Importer.Import(D->getBeginLoc());
-
+ // FIXME: ToUnderlyingType is not used.
TypedefNameDecl *ToTypedef;
if (IsAlias) {
if (GetImportedOrCreateDecl<TypeAliasDecl>(
- ToTypedef, D, Importer.getToContext(), DC, StartL, Loc,
- Name.getAsIdentifierInfo(), TInfo))
+ ToTypedef, D, Importer.getToContext(), DC, ToBeginLoc, Loc,
+ Name.getAsIdentifierInfo(), ToTypeSourceInfo))
return ToTypedef;
} else if (GetImportedOrCreateDecl<TypedefDecl>(
- ToTypedef, D, Importer.getToContext(), DC, StartL, Loc,
- Name.getAsIdentifierInfo(), TInfo))
+ ToTypedef, D, Importer.getToContext(), DC, ToBeginLoc, Loc,
+ Name.getAsIdentifierInfo(), ToTypeSourceInfo))
return ToTypedef;
ToTypedef->setAccess(D->getAccess());
@@ -1994,22 +2371,23 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
return ToTypedef;
}
-Decl *ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
return VisitTypedefNameDecl(D, /*IsAlias=*/false);
}
-Decl *ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) {
return VisitTypedefNameDecl(D, /*IsAlias=*/true);
}
-Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+ExpectedDecl
+ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
// Import the major distinguishing characteristics of this typedef.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *FoundD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, FoundD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, FoundD, Loc))
+ return std::move(Err);
if (FoundD)
return FoundD;
@@ -2034,26 +2412,23 @@ Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- TemplateParameterList *Params = ImportTemplateParameterList(
- D->getTemplateParameters());
- if (!Params)
- return nullptr;
-
- auto *TemplDecl = cast_or_null<TypeAliasDecl>(
- Importer.Import(D->getTemplatedDecl()));
- if (!TemplDecl)
- return nullptr;
+ TemplateParameterList *ToTemplateParameters;
+ TypeAliasDecl *ToTemplatedDecl;
+ if (auto Imp = importSeq(D->getTemplateParameters(), D->getTemplatedDecl()))
+ std::tie(ToTemplateParameters, ToTemplatedDecl) = *Imp;
+ else
+ return Imp.takeError();
TypeAliasTemplateDecl *ToAlias;
if (GetImportedOrCreateDecl(ToAlias, D, Importer.getToContext(), DC, Loc,
- Name, Params, TemplDecl))
+ Name, ToTemplateParameters, ToTemplatedDecl))
return ToAlias;
- TemplDecl->setDescribedAliasTemplate(ToAlias);
+ ToTemplatedDecl->setDescribedAliasTemplate(ToAlias);
ToAlias->setAccess(D->getAccess());
ToAlias->setLexicalDeclContext(LexicalDC);
@@ -2061,48 +2436,53 @@ Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
return ToAlias;
}
-Decl *ASTNodeImporter::VisitLabelDecl(LabelDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitLabelDecl(LabelDecl *D) {
// Import the major distinguishing characteristics of this label.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
assert(LexicalDC->isFunctionOrMethod());
LabelDecl *ToLabel;
- if (D->isGnuLocal()
- ? GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC,
- Importer.Import(D->getLocation()),
- Name.getAsIdentifierInfo(),
- Importer.Import(D->getBeginLoc()))
- : GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC,
- Importer.Import(D->getLocation()),
- Name.getAsIdentifierInfo()))
- return ToLabel;
-
- auto *Label = cast_or_null<LabelStmt>(Importer.Import(D->getStmt()));
- if (!Label)
- return nullptr;
+ if (D->isGnuLocal()) {
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+ if (GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo(), *BeginLocOrErr))
+ return ToLabel;
- ToLabel->setStmt(Label);
+ } else {
+ if (GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo()))
+ return ToLabel;
+
+ }
+
+ Expected<LabelStmt *> ToStmtOrErr = import(D->getStmt());
+ if (!ToStmtOrErr)
+ return ToStmtOrErr.takeError();
+
+ ToLabel->setStmt(*ToStmtOrErr);
ToLabel->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToLabel);
return ToLabel;
}
-Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
// Import the major distinguishing characteristics of this enum.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -2110,7 +2490,9 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
unsigned IDNS = Decl::IDNS_Tag;
DeclarationName SearchName = Name;
if (!SearchName && D->getTypedefNameForAnonDecl()) {
- SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ if (Error Err = importInto(
+ SearchName, D->getTypedefNameForAnonDecl()->getDeclName()))
+ return std::move(Err);
IDNS = Decl::IDNS_Ordinary;
} else if (Importer.getToContext().getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Ordinary;
@@ -2124,13 +2506,12 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- Decl *Found = FoundDecl;
- if (auto *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (auto *Typedef = dyn_cast<TypedefNameDecl>(FoundDecl)) {
if (const auto *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
- Found = Tag->getDecl();
+ FoundDecl = Tag->getDecl();
}
- if (auto *FoundEnum = dyn_cast<EnumDecl>(Found)) {
+ if (auto *FoundEnum = dyn_cast<EnumDecl>(FoundDecl)) {
if (IsStructuralMatch(D, FoundEnum))
return Importer.MapImported(D, FoundEnum);
}
@@ -2142,37 +2523,43 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
+ if (!Name)
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
+ SourceLocation ToBeginLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ QualType ToIntegerType;
+ if (auto Imp = importSeq(
+ D->getBeginLoc(), D->getQualifierLoc(), D->getIntegerType()))
+ std::tie(ToBeginLoc, ToQualifierLoc, ToIntegerType) = *Imp;
+ else
+ return Imp.takeError();
+
// Create the enum declaration.
EnumDecl *D2;
if (GetImportedOrCreateDecl(
- D2, D, Importer.getToContext(), DC, Importer.Import(D->getBeginLoc()),
+ D2, D, Importer.getToContext(), DC, ToBeginLoc,
Loc, Name.getAsIdentifierInfo(), nullptr, D->isScoped(),
D->isScopedUsingClassTag(), D->isFixed()))
return D2;
- // Import the qualifier, if any.
- D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ D2->setQualifierInfo(ToQualifierLoc);
+ D2->setIntegerType(ToIntegerType);
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
- // Import the integer type.
- QualType ToIntegerType = Importer.Import(D->getIntegerType());
- if (ToIntegerType.isNull())
- return nullptr;
- D2->setIntegerType(ToIntegerType);
-
// Import the definition
- if (D->isCompleteDefinition() && ImportDefinition(D, D2))
- return nullptr;
+ if (D->isCompleteDefinition())
+ if (Error Err = ImportDefinition(D, D2))
+ return std::move(Err);
return D2;
}
-Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
bool IsFriendTemplate = false;
if (auto *DCXX = dyn_cast<CXXRecordDecl>(D)) {
IsFriendTemplate =
@@ -2194,11 +2581,11 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
// ClassTemplateSpecializationDecl itself. Thus, we start with an extra
// condition in order to be able to import the implict Decl.
!D->isImplicit()) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
+ ExpectedDecl ImportedDefOrErr = import(Definition);
+ if (!ImportedDefOrErr)
+ return ImportedDefOrErr.takeError();
- return Importer.MapImported(D, ImportedDef);
+ return Importer.MapImported(D, *ImportedDefOrErr);
}
// Import the major distinguishing characteristics of this record.
@@ -2206,8 +2593,8 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -2215,7 +2602,9 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
unsigned IDNS = Decl::IDNS_Tag;
DeclarationName SearchName = Name;
if (!SearchName && D->getTypedefNameForAnonDecl()) {
- SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ if (Error Err = importInto(
+ SearchName, D->getTypedefNameForAnonDecl()->getDeclName()))
+ return std::move(Err);
IDNS = Decl::IDNS_Ordinary;
} else if (Importer.getToContext().getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Ordinary;
@@ -2245,16 +2634,23 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
}
if (D->getDescribedTemplate()) {
- if (auto *Template = dyn_cast<ClassTemplateDecl>(Found))
+ if (auto *Template = dyn_cast<ClassTemplateDecl>(Found)) {
Found = Template->getTemplatedDecl();
- else
+ } else {
+ ConflictingDecls.push_back(FoundDecl);
continue;
+ }
}
if (auto *FoundRecord = dyn_cast<RecordDecl>(Found)) {
if (!SearchName) {
if (!IsStructuralMatch(D, FoundRecord, false))
continue;
+ } else {
+ if (!IsStructuralMatch(D, FoundRecord)) {
+ ConflictingDecls.push_back(FoundDecl);
+ continue;
+ }
}
PrevDecl = FoundRecord;
@@ -2263,8 +2659,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if ((SearchName && !D->isCompleteDefinition() && !IsFriendTemplate)
|| (D->isCompleteDefinition() &&
D->isAnonymousStructOrUnion()
- == FoundDef->isAnonymousStructOrUnion() &&
- IsStructuralMatch(D, FoundDef))) {
+ == FoundDef->isAnonymousStructOrUnion())) {
// The record types structurally match, or the "from" translation
// unit only had a forward declaration anyway; call it the same
// function.
@@ -2278,10 +2673,13 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (D->isCompleteDefinition() && !Importer.isMinimalImport())
// FoundDef may not have every implicit method that D has
// because implicit methods are created only if they are used.
- ImportImplicitMethods(DCXX, FoundCXX);
+ if (Error Err = ImportImplicitMethods(DCXX, FoundCXX))
+ return std::move(Err);
}
return FoundDef;
}
+ if (IsFriendTemplate)
+ continue;
} else if (!D->isCompleteDefinition()) {
// We have a forward declaration of this type, so adopt that forward
// declaration rather than building a new one.
@@ -2297,18 +2695,18 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (FoundRecord->isCompleteDefinition() &&
D->isCompleteDefinition() &&
- !IsStructuralMatch(D, FoundRecord))
- continue;
-
- if (IsFriendTemplate)
+ !IsStructuralMatch(D, FoundRecord)) {
+ ConflictingDecls.push_back(FoundDecl);
continue;
+ }
AdoptDecl = FoundRecord;
continue;
- } else if (!SearchName) {
- continue;
}
- }
+
+ continue;
+ } else if (isa<ValueDecl>(Found))
+ continue;
ConflictingDecls.push_back(FoundDecl);
}
@@ -2317,33 +2715,40 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
+ if (!Name)
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
// Create the record declaration.
RecordDecl *D2 = AdoptDecl;
- SourceLocation StartLoc = Importer.Import(D->getBeginLoc());
if (!D2) {
CXXRecordDecl *D2CXX = nullptr;
if (auto *DCXX = dyn_cast<CXXRecordDecl>(D)) {
if (DCXX->isLambda()) {
- TypeSourceInfo *TInfo = Importer.Import(DCXX->getLambdaTypeInfo());
+ auto TInfoOrErr = import(DCXX->getLambdaTypeInfo());
+ if (!TInfoOrErr)
+ return TInfoOrErr.takeError();
if (GetImportedOrCreateSpecialDecl(
D2CXX, CXXRecordDecl::CreateLambda, D, Importer.getToContext(),
- DC, TInfo, Loc, DCXX->isDependentLambda(),
+ DC, *TInfoOrErr, Loc, DCXX->isDependentLambda(),
DCXX->isGenericLambda(), DCXX->getLambdaCaptureDefault()))
return D2CXX;
- Decl *CDecl = Importer.Import(DCXX->getLambdaContextDecl());
- if (DCXX->getLambdaContextDecl() && !CDecl)
- return nullptr;
- D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), CDecl);
+ ExpectedDecl CDeclOrErr = import(DCXX->getLambdaContextDecl());
+ if (!CDeclOrErr)
+ return CDeclOrErr.takeError();
+ D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), *CDeclOrErr);
} else if (DCXX->isInjectedClassName()) {
// We have to be careful to do a similar dance to the one in
// Sema::ActOnStartCXXMemberDeclarations
CXXRecordDecl *const PrevDecl = nullptr;
const bool DelayTypeCreation = true;
if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(),
- D->getTagKind(), DC, StartLoc, Loc,
+ D->getTagKind(), DC, *BeginLocOrErr, Loc,
Name.getAsIdentifierInfo(), PrevDecl,
DelayTypeCreation))
return D2CXX;
@@ -2351,7 +2756,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
D2CXX, dyn_cast<CXXRecordDecl>(DC));
} else {
if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(),
- D->getTagKind(), DC, StartLoc, Loc,
+ D->getTagKind(), DC, *BeginLocOrErr, Loc,
Name.getAsIdentifierInfo(),
cast_or_null<CXXRecordDecl>(PrevDecl)))
return D2CXX;
@@ -2365,10 +2770,9 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (ClassTemplateDecl *FromDescribed =
DCXX->getDescribedClassTemplate()) {
- auto *ToDescribed = cast_or_null<ClassTemplateDecl>(
- Importer.Import(FromDescribed));
- if (!ToDescribed)
- return nullptr;
+ ClassTemplateDecl *ToDescribed;
+ if (Error Err = importInto(ToDescribed, FromDescribed))
+ return std::move(Err);
D2CXX->setDescribedClassTemplate(ToDescribed);
if (!DCXX->isInjectedClassName() && !IsFriendTemplate) {
// In a record describing a template the type should be an
@@ -2398,51 +2802,58 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
TemplateSpecializationKind SK =
MemberInfo->getTemplateSpecializationKind();
CXXRecordDecl *FromInst = DCXX->getInstantiatedFromMemberClass();
- auto *ToInst =
- cast_or_null<CXXRecordDecl>(Importer.Import(FromInst));
- if (FromInst && !ToInst)
- return nullptr;
- D2CXX->setInstantiationOfMemberClass(ToInst, SK);
- D2CXX->getMemberSpecializationInfo()->setPointOfInstantiation(
- Importer.Import(MemberInfo->getPointOfInstantiation()));
+
+ if (Expected<CXXRecordDecl *> ToInstOrErr = import(FromInst))
+ D2CXX->setInstantiationOfMemberClass(*ToInstOrErr, SK);
+ else
+ return ToInstOrErr.takeError();
+
+ if (ExpectedSLoc POIOrErr =
+ import(MemberInfo->getPointOfInstantiation()))
+ D2CXX->getMemberSpecializationInfo()->setPointOfInstantiation(
+ *POIOrErr);
+ else
+ return POIOrErr.takeError();
}
+
} else {
if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(),
- D->getTagKind(), DC, StartLoc, Loc,
+ D->getTagKind(), DC, *BeginLocOrErr, Loc,
Name.getAsIdentifierInfo(), PrevDecl))
return D2;
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
}
- D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ if (auto QualifierLocOrErr = import(D->getQualifierLoc()))
+ D2->setQualifierInfo(*QualifierLocOrErr);
+ else
+ return QualifierLocOrErr.takeError();
+
if (D->isAnonymousStructOrUnion())
D2->setAnonymousStructOrUnion(true);
}
Importer.MapImported(D, D2);
- if (D->isCompleteDefinition() && ImportDefinition(D, D2, IDK_Default))
- return nullptr;
+ if (D->isCompleteDefinition())
+ if (Error Err = ImportDefinition(D, D2, IDK_Default))
+ return std::move(Err);
return D2;
}
-Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
// Import the major distinguishing characteristics of this enumerator.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
-
// Determine whether there are any other declarations with the same name and
// in the same context.
if (!LexicalDC->isFunctionOrMethod()) {
@@ -2467,18 +2878,22 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- Expr *Init = Importer.Import(D->getInitExpr());
- if (D->getInitExpr() && !Init)
- return nullptr;
+ ExpectedType TypeOrErr = import(D->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
+
+ ExpectedExpr InitOrErr = import(D->getInitExpr());
+ if (!InitOrErr)
+ return InitOrErr.takeError();
EnumConstantDecl *ToEnumerator;
if (GetImportedOrCreateDecl(
ToEnumerator, D, Importer.getToContext(), cast<EnumDecl>(DC), Loc,
- Name.getAsIdentifierInfo(), T, Init, D->getInitVal()))
+ Name.getAsIdentifierInfo(), *TypeOrErr, *InitOrErr, D->getInitVal()))
return ToEnumerator;
ToEnumerator->setAccess(D->getAccess());
@@ -2487,52 +2902,57 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
return ToEnumerator;
}
-bool ASTNodeImporter::ImportTemplateInformation(FunctionDecl *FromFD,
- FunctionDecl *ToFD) {
+Error ASTNodeImporter::ImportTemplateInformation(
+ FunctionDecl *FromFD, FunctionDecl *ToFD) {
switch (FromFD->getTemplatedKind()) {
case FunctionDecl::TK_NonTemplate:
case FunctionDecl::TK_FunctionTemplate:
- return false;
+ return Error::success();
case FunctionDecl::TK_MemberSpecialization: {
- auto *InstFD = cast_or_null<FunctionDecl>(
- Importer.Import(FromFD->getInstantiatedFromMemberFunction()));
- if (!InstFD)
- return true;
-
TemplateSpecializationKind TSK = FromFD->getTemplateSpecializationKind();
- SourceLocation POI = Importer.Import(
- FromFD->getMemberSpecializationInfo()->getPointOfInstantiation());
- ToFD->setInstantiationOfMemberFunction(InstFD, TSK);
- ToFD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
- return false;
+
+ if (Expected<FunctionDecl *> InstFDOrErr =
+ import(FromFD->getInstantiatedFromMemberFunction()))
+ ToFD->setInstantiationOfMemberFunction(*InstFDOrErr, TSK);
+ else
+ return InstFDOrErr.takeError();
+
+ if (ExpectedSLoc POIOrErr = import(
+ FromFD->getMemberSpecializationInfo()->getPointOfInstantiation()))
+ ToFD->getMemberSpecializationInfo()->setPointOfInstantiation(*POIOrErr);
+ else
+ return POIOrErr.takeError();
+
+ return Error::success();
}
case FunctionDecl::TK_FunctionTemplateSpecialization: {
- FunctionTemplateDecl* Template;
- OptionalTemplateArgsTy ToTemplArgs;
- std::tie(Template, ToTemplArgs) =
+ auto FunctionAndArgsOrErr =
ImportFunctionTemplateWithTemplateArgsFromSpecialization(FromFD);
- if (!Template || !ToTemplArgs)
- return true;
+ if (!FunctionAndArgsOrErr)
+ return FunctionAndArgsOrErr.takeError();
TemplateArgumentList *ToTAList = TemplateArgumentList::CreateCopy(
- Importer.getToContext(), *ToTemplArgs);
+ Importer.getToContext(), std::get<1>(*FunctionAndArgsOrErr));
auto *FTSInfo = FromFD->getTemplateSpecializationInfo();
TemplateArgumentListInfo ToTAInfo;
const auto *FromTAArgsAsWritten = FTSInfo->TemplateArgumentsAsWritten;
if (FromTAArgsAsWritten)
- if (ImportTemplateArgumentListInfo(*FromTAArgsAsWritten, ToTAInfo))
- return true;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ *FromTAArgsAsWritten, ToTAInfo))
+ return Err;
- SourceLocation POI = Importer.Import(FTSInfo->getPointOfInstantiation());
+ ExpectedSLoc POIOrErr = import(FTSInfo->getPointOfInstantiation());
+ if (!POIOrErr)
+ return POIOrErr.takeError();
TemplateSpecializationKind TSK = FTSInfo->getTemplateSpecializationKind();
ToFD->setFunctionTemplateSpecialization(
- Template, ToTAList, /* InsertPos= */ nullptr,
- TSK, FromTAArgsAsWritten ? &ToTAInfo : nullptr, POI);
- return false;
+ std::get<0>(*FunctionAndArgsOrErr), ToTAList, /* InsertPos= */ nullptr,
+ TSK, FromTAArgsAsWritten ? &ToTAInfo : nullptr, *POIOrErr);
+ return Error::success();
}
case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
@@ -2540,53 +2960,56 @@ bool ASTNodeImporter::ImportTemplateInformation(FunctionDecl *FromFD,
UnresolvedSet<8> TemplDecls;
unsigned NumTemplates = FromInfo->getNumTemplates();
for (unsigned I = 0; I < NumTemplates; I++) {
- if (auto *ToFTD = cast_or_null<FunctionTemplateDecl>(
- Importer.Import(FromInfo->getTemplate(I))))
- TemplDecls.addDecl(ToFTD);
+ if (Expected<FunctionTemplateDecl *> ToFTDOrErr =
+ import(FromInfo->getTemplate(I)))
+ TemplDecls.addDecl(*ToFTDOrErr);
else
- return true;
+ return ToFTDOrErr.takeError();
}
// Import TemplateArgumentListInfo.
TemplateArgumentListInfo ToTAInfo;
- if (ImportTemplateArgumentListInfo(
- FromInfo->getLAngleLoc(), FromInfo->getRAngleLoc(),
- llvm::makeArrayRef(FromInfo->getTemplateArgs(),
- FromInfo->getNumTemplateArgs()),
- ToTAInfo))
- return true;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ FromInfo->getLAngleLoc(), FromInfo->getRAngleLoc(),
+ llvm::makeArrayRef(
+ FromInfo->getTemplateArgs(), FromInfo->getNumTemplateArgs()),
+ ToTAInfo))
+ return Err;
ToFD->setDependentTemplateSpecialization(Importer.getToContext(),
TemplDecls, ToTAInfo);
- return false;
+ return Error::success();
}
}
llvm_unreachable("All cases should be covered!");
}
-FunctionDecl *
+Expected<FunctionDecl *>
ASTNodeImporter::FindFunctionTemplateSpecialization(FunctionDecl *FromFD) {
- FunctionTemplateDecl* Template;
- OptionalTemplateArgsTy ToTemplArgs;
- std::tie(Template, ToTemplArgs) =
+ auto FunctionAndArgsOrErr =
ImportFunctionTemplateWithTemplateArgsFromSpecialization(FromFD);
- if (!Template || !ToTemplArgs)
- return nullptr;
+ if (!FunctionAndArgsOrErr)
+ return FunctionAndArgsOrErr.takeError();
+ FunctionTemplateDecl *Template;
+ TemplateArgsTy ToTemplArgs;
+ std::tie(Template, ToTemplArgs) = *FunctionAndArgsOrErr;
void *InsertPos = nullptr;
- auto *FoundSpec = Template->findSpecialization(*ToTemplArgs, InsertPos);
+ auto *FoundSpec = Template->findSpecialization(ToTemplArgs, InsertPos);
return FoundSpec;
}
-Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
- SmallVector<Decl*, 2> Redecls = getCanonicalForwardRedeclChain(D);
+ SmallVector<Decl *, 2> Redecls = getCanonicalForwardRedeclChain(D);
auto RedeclIt = Redecls.begin();
// Import the first part of the decl chain. I.e. import all previous
// declarations starting from the canonical decl.
- for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt)
- if (!Importer.Import(*RedeclIt))
- return nullptr;
+ for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt) {
+ ExpectedDecl ToRedeclOrErr = import(*RedeclIt);
+ if (!ToRedeclOrErr)
+ return ToRedeclOrErr.takeError();
+ }
assert(*RedeclIt == D);
// Import the major distinguishing characteristics of this function.
@@ -2594,8 +3017,8 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -2609,10 +3032,12 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// FIXME handle member function templates (TK_MemberSpecialization) similarly?
if (D->getTemplatedKind() ==
FunctionDecl::TK_FunctionTemplateSpecialization) {
- if (FunctionDecl *FoundFunction = FindFunctionTemplateSpecialization(D)) {
- if (D->doesThisDeclarationHaveABody() &&
- FoundFunction->hasBody())
- return Importer.Imported(D, FoundFunction);
+ auto FoundFunctionOrErr = FindFunctionTemplateSpecialization(D);
+ if (!FoundFunctionOrErr)
+ return FoundFunctionOrErr.takeError();
+ if (FunctionDecl *FoundFunction = *FoundFunctionOrErr) {
+ if (D->doesThisDeclarationHaveABody() && FoundFunction->hasBody())
+ return Importer.MapImported(D, FoundFunction);
FoundByLookup = FoundFunction;
}
}
@@ -2673,13 +3098,14 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
DeclarationNameInfo NameInfo(Name, Loc);
// Import additional name location/type info.
- ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+ if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
+ return std::move(Err);
QualType FromTy = D->getType();
bool usedDifferentExceptionSpec = false;
@@ -2700,84 +3126,93 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
}
- // Import the type.
- QualType T = Importer.Import(FromTy);
- if (T.isNull())
- return nullptr;
+ QualType T;
+ TypeSourceInfo *TInfo;
+ SourceLocation ToInnerLocStart, ToEndLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ FromTy, D->getTypeSourceInfo(), D->getInnerLocStart(),
+ D->getQualifierLoc(), D->getEndLoc()))
+ std::tie(T, TInfo, ToInnerLocStart, ToQualifierLoc, ToEndLoc) = *Imp;
+ else
+ return Imp.takeError();
// Import the function parameters.
SmallVector<ParmVarDecl *, 8> Parameters;
for (auto P : D->parameters()) {
- auto *ToP = cast_or_null<ParmVarDecl>(Importer.Import(P));
- if (!ToP)
- return nullptr;
-
- Parameters.push_back(ToP);
+ if (Expected<ParmVarDecl *> ToPOrErr = import(P))
+ Parameters.push_back(*ToPOrErr);
+ else
+ return ToPOrErr.takeError();
}
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- if (D->getTypeSourceInfo() && !TInfo)
- return nullptr;
-
// Create the imported function.
FunctionDecl *ToFunction = nullptr;
- SourceLocation InnerLocStart = Importer.Import(D->getInnerLocStart());
if (auto *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
if (GetImportedOrCreateDecl<CXXConstructorDecl>(
- ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- InnerLocStart, NameInfo, T, TInfo, FromConstructor->isExplicit(),
- D->isInlineSpecified(), D->isImplicit(), D->isConstexpr()))
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ ToInnerLocStart, NameInfo, T, TInfo,
+ FromConstructor->isExplicit(),
+ D->isInlineSpecified(), D->isImplicit(), D->isConstexpr()))
return ToFunction;
- if (unsigned NumInitializers = FromConstructor->getNumCtorInitializers()) {
- SmallVector<CXXCtorInitializer *, 4> CtorInitializers;
- for (auto *I : FromConstructor->inits()) {
- auto *ToI = cast_or_null<CXXCtorInitializer>(Importer.Import(I));
- if (!ToI && I)
- return nullptr;
- CtorInitializers.push_back(ToI);
- }
- auto **Memory =
- new (Importer.getToContext()) CXXCtorInitializer *[NumInitializers];
- std::copy(CtorInitializers.begin(), CtorInitializers.end(), Memory);
- auto *ToCtor = cast<CXXConstructorDecl>(ToFunction);
- ToCtor->setCtorInitializers(Memory);
- ToCtor->setNumCtorInitializers(NumInitializers);
- }
} else if (isa<CXXDestructorDecl>(D)) {
if (GetImportedOrCreateDecl<CXXDestructorDecl>(
- ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- InnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
- D->isImplicit()))
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
+ D->isImplicit()))
return ToFunction;
} else if (CXXConversionDecl *FromConversion =
dyn_cast<CXXConversionDecl>(D)) {
if (GetImportedOrCreateDecl<CXXConversionDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- InnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
+ ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
FromConversion->isExplicit(), D->isConstexpr(), SourceLocation()))
return ToFunction;
} else if (auto *Method = dyn_cast<CXXMethodDecl>(D)) {
if (GetImportedOrCreateDecl<CXXMethodDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- InnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(),
+ ToInnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(),
Method->isInlineSpecified(), D->isConstexpr(), SourceLocation()))
return ToFunction;
} else {
if (GetImportedOrCreateDecl(ToFunction, D, Importer.getToContext(), DC,
- InnerLocStart, NameInfo, T, TInfo,
+ ToInnerLocStart, NameInfo, T, TInfo,
D->getStorageClass(), D->isInlineSpecified(),
D->hasWrittenPrototype(), D->isConstexpr()))
return ToFunction;
}
- // Import the qualifier, if any.
- ToFunction->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ // Connect the redecl chain.
+ if (FoundByLookup) {
+ auto *Recent = const_cast<FunctionDecl *>(
+ FoundByLookup->getMostRecentDecl());
+ ToFunction->setPreviousDecl(Recent);
+ }
+
+ // Import Ctor initializers.
+ if (auto *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (unsigned NumInitializers = FromConstructor->getNumCtorInitializers()) {
+ SmallVector<CXXCtorInitializer *, 4> CtorInitializers(NumInitializers);
+ // Import first, then allocate memory and copy if there was no error.
+ if (Error Err = ImportContainerChecked(
+ FromConstructor->inits(), CtorInitializers))
+ return std::move(Err);
+ auto **Memory =
+ new (Importer.getToContext()) CXXCtorInitializer *[NumInitializers];
+ std::copy(CtorInitializers.begin(), CtorInitializers.end(), Memory);
+ auto *ToCtor = cast<CXXConstructorDecl>(ToFunction);
+ ToCtor->setCtorInitializers(Memory);
+ ToCtor->setNumCtorInitializers(NumInitializers);
+ }
+ }
+
+ ToFunction->setQualifierInfo(ToQualifierLoc);
ToFunction->setAccess(D->getAccess());
ToFunction->setLexicalDeclContext(LexicalDC);
ToFunction->setVirtualAsWritten(D->isVirtualAsWritten());
ToFunction->setTrivial(D->isTrivial());
ToFunction->setPure(D->isPure());
- ToFunction->setRangeEnd(Importer.Import(D->getEndLoc()));
+ ToFunction->setRangeEnd(ToEndLoc);
// Set the parameters.
for (auto *Param : Parameters) {
@@ -2786,12 +3221,6 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
ToFunction->setParams(Parameters);
- if (FoundByLookup) {
- auto *Recent = const_cast<FunctionDecl *>(
- FoundByLookup->getMostRecentDecl());
- ToFunction->setPreviousDecl(Recent);
- }
-
// We need to complete creation of FunctionProtoTypeLoc manually with setting
// params it refers to.
if (TInfo) {
@@ -2804,30 +3233,33 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
if (usedDifferentExceptionSpec) {
// Update FunctionProtoType::ExtProtoInfo.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
- ToFunction->setType(T);
+ if (ExpectedType TyOrErr = import(D->getType()))
+ ToFunction->setType(*TyOrErr);
+ else
+ return TyOrErr.takeError();
}
// Import the describing template function, if any.
- if (FromFT)
- if (!Importer.Import(FromFT))
- return nullptr;
+ if (FromFT) {
+ auto ToFTOrErr = import(FromFT);
+ if (!ToFTOrErr)
+ return ToFTOrErr.takeError();
+ }
if (D->doesThisDeclarationHaveABody()) {
if (Stmt *FromBody = D->getBody()) {
- if (Stmt *ToBody = Importer.Import(FromBody)) {
- ToFunction->setBody(ToBody);
- }
+ if (ExpectedStmt ToBodyOrErr = import(FromBody))
+ ToFunction->setBody(*ToBodyOrErr);
+ else
+ return ToBodyOrErr.takeError();
}
}
// FIXME: Other bits to merge?
// If it is a template, import all related things.
- if (ImportTemplateInformation(D, ToFunction))
- return nullptr;
+ if (Error Err = ImportTemplateInformation(D, ToFunction))
+ return std::move(Err);
bool IsFriend = D->isInIdentifierNamespace(Decl::IDNS_OrdinaryFriend);
@@ -2847,41 +3279,43 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
DC->makeDeclVisibleInContext(ToFunction);
}
- // Import the rest of the chain. I.e. import all subsequent declarations.
- for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt)
- if (!Importer.Import(*RedeclIt))
- return nullptr;
-
if (auto *FromCXXMethod = dyn_cast<CXXMethodDecl>(D))
ImportOverrides(cast<CXXMethodDecl>(ToFunction), FromCXXMethod);
+ // Import the rest of the chain. I.e. import all subsequent declarations.
+ for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) {
+ ExpectedDecl ToRedeclOrErr = import(*RedeclIt);
+ if (!ToRedeclOrErr)
+ return ToRedeclOrErr.takeError();
+ }
+
return ToFunction;
}
-Decl *ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) {
return VisitFunctionDecl(D);
}
-Decl *ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
return VisitCXXMethodDecl(D);
}
-Decl *ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
return VisitCXXMethodDecl(D);
}
-Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
return VisitCXXMethodDecl(D);
}
-Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -2889,7 +3323,7 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (auto *FoundDecl : FoundDecls) {
- if (auto *FoundField = dyn_cast<FieldDecl>(FoundDecl)) {
+ if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecl)) {
// For anonymous fields, match up by index.
if (!Name &&
ASTImporter::getFieldIndex(D) !=
@@ -2907,64 +3341,67 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// We don't have yet the initializer set.
if (FoundField->hasInClassInitializer() &&
!FoundField->getInClassInitializer()) {
- Expr *ToInitializer = Importer.Import(FromInitializer);
- if (!ToInitializer)
- // We can't return a nullptr here,
+ if (ExpectedExpr ToInitializerOrErr = import(FromInitializer))
+ FoundField->setInClassInitializer(*ToInitializerOrErr);
+ else {
+ // We can't return error here,
// since we already mapped D as imported.
+ // FIXME: warning message?
+ consumeError(ToInitializerOrErr.takeError());
return FoundField;
- FoundField->setInClassInitializer(ToInitializer);
+ }
}
}
return FoundField;
}
+ // FIXME: Why is this case not handled with calling HandleNameConflict?
Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
<< Name << D->getType() << FoundField->getType();
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- // Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
-
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- Expr *BitWidth = Importer.Import(D->getBitWidth());
- if (!BitWidth && D->getBitWidth())
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTInfo;
+ Expr *ToBitWidth;
+ SourceLocation ToInnerLocStart;
+ Expr *ToInitializer;
+ if (auto Imp = importSeq(
+ D->getType(), D->getTypeSourceInfo(), D->getBitWidth(),
+ D->getInnerLocStart(), D->getInClassInitializer()))
+ std::tie(
+ ToType, ToTInfo, ToBitWidth, ToInnerLocStart, ToInitializer) = *Imp;
+ else
+ return Imp.takeError();
FieldDecl *ToField;
if (GetImportedOrCreateDecl(ToField, D, Importer.getToContext(), DC,
- Importer.Import(D->getInnerLocStart()), Loc,
- Name.getAsIdentifierInfo(), T, TInfo, BitWidth,
- D->isMutable(), D->getInClassInitStyle()))
+ ToInnerLocStart, Loc, Name.getAsIdentifierInfo(),
+ ToType, ToTInfo, ToBitWidth, D->isMutable(),
+ D->getInClassInitStyle()))
return ToField;
ToField->setAccess(D->getAccess());
ToField->setLexicalDeclContext(LexicalDC);
- if (Expr *FromInitializer = D->getInClassInitializer()) {
- Expr *ToInitializer = Importer.Import(FromInitializer);
- if (ToInitializer)
- ToField->setInClassInitializer(ToInitializer);
- else
- return nullptr;
- }
+ if (ToInitializer)
+ ToField->setInClassInitializer(ToInitializer);
ToField->setImplicit(D->isImplicit());
LexicalDC->addDeclInternal(ToField);
return ToField;
}
-Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -2990,39 +3427,40 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
if (!Name && I < N-1)
continue;
+ // FIXME: Why is this case not handled with calling HandleNameConflict?
Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
<< Name << D->getType() << FoundField->getType();
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
// Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ auto TypeOrErr = import(D->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
auto **NamedChain =
new (Importer.getToContext()) NamedDecl*[D->getChainingSize()];
unsigned i = 0;
- for (auto *PI : D->chain()) {
- Decl *D = Importer.Import(PI);
- if (!D)
- return nullptr;
- NamedChain[i++] = cast<NamedDecl>(D);
- }
+ for (auto *PI : D->chain())
+ if (Expected<NamedDecl *> ToD = import(PI))
+ NamedChain[i++] = *ToD;
+ else
+ return ToD.takeError();
llvm::MutableArrayRef<NamedDecl *> CH = {NamedChain, D->getChainingSize()};
IndirectFieldDecl *ToIndirectField;
if (GetImportedOrCreateDecl(ToIndirectField, D, Importer.getToContext(), DC,
- Loc, Name.getAsIdentifierInfo(), T, CH))
+ Loc, Name.getAsIdentifierInfo(), *TypeOrErr, CH))
// FIXME here we leak `NamedChain` which is allocated before
return ToIndirectField;
- for (const auto *A : D->attrs())
- ToIndirectField->addAttr(Importer.Import(A));
+ for (const auto *Attr : D->attrs())
+ ToIndirectField->addAttr(Importer.Import(Attr));
ToIndirectField->setAccess(D->getAccess());
ToIndirectField->setLexicalDeclContext(LexicalDC);
@@ -3030,13 +3468,11 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
return ToIndirectField;
}
-Decl *ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// Import the major distinguishing characteristics of a declaration.
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- DeclContext *LexicalDC = D->getDeclContext() == D->getLexicalDeclContext()
- ? DC : Importer.ImportContext(D->getLexicalDeclContext());
- if (!DC || !LexicalDC)
- return nullptr;
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
// Determine whether we've already imported this decl.
// FriendDecl is not a NamedDecl so we cannot use localUncachedLookup.
@@ -3061,30 +3497,42 @@ Decl *ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// Not found. Create it.
FriendDecl::FriendUnion ToFU;
if (NamedDecl *FriendD = D->getFriendDecl()) {
- auto *ToFriendD = cast_or_null<NamedDecl>(Importer.Import(FriendD));
- if (ToFriendD && FriendD->getFriendObjectKind() != Decl::FOK_None &&
+ NamedDecl *ToFriendD;
+ if (Error Err = importInto(ToFriendD, FriendD))
+ return std::move(Err);
+
+ if (FriendD->getFriendObjectKind() != Decl::FOK_None &&
!(FriendD->isInIdentifierNamespace(Decl::IDNS_NonMemberOperator)))
ToFriendD->setObjectOfFriendDecl(false);
ToFU = ToFriendD;
- } else // The friend is a type, not a decl.
- ToFU = Importer.Import(D->getFriendType());
- if (!ToFU)
- return nullptr;
+ } else { // The friend is a type, not a decl.
+ if (auto TSIOrErr = import(D->getFriendType()))
+ ToFU = *TSIOrErr;
+ else
+ return TSIOrErr.takeError();
+ }
SmallVector<TemplateParameterList *, 1> ToTPLists(D->NumTPLists);
auto **FromTPLists = D->getTrailingObjects<TemplateParameterList *>();
for (unsigned I = 0; I < D->NumTPLists; I++) {
- TemplateParameterList *List = ImportTemplateParameterList(FromTPLists[I]);
- if (!List)
- return nullptr;
- ToTPLists[I] = List;
+ if (auto ListOrErr = ImportTemplateParameterList(FromTPLists[I]))
+ ToTPLists[I] = *ListOrErr;
+ else
+ return ListOrErr.takeError();
}
+ auto LocationOrErr = import(D->getLocation());
+ if (!LocationOrErr)
+ return LocationOrErr.takeError();
+ auto FriendLocOrErr = import(D->getFriendLoc());
+ if (!FriendLocOrErr)
+ return FriendLocOrErr.takeError();
+
FriendDecl *FrD;
if (GetImportedOrCreateDecl(FrD, D, Importer.getToContext(), DC,
- Importer.Import(D->getLocation()), ToFU,
- Importer.Import(D->getFriendLoc()), ToTPLists))
+ *LocationOrErr, ToFU,
+ *FriendLocOrErr, ToTPLists))
return FrD;
FrD->setAccess(D->getAccess());
@@ -3093,14 +3541,14 @@ Decl *ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
return FrD;
}
-Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
// Import the major distinguishing characteristics of an ivar.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -3108,7 +3556,7 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (auto *FoundDecl : FoundDecls) {
- if (auto *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecl)) {
+ if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecl)) {
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundIvar->getType())) {
Importer.MapImported(D, FoundIvar);
@@ -3119,26 +3567,27 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
<< Name << D->getType() << FoundIvar->getType();
Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
<< FoundIvar->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- // Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
-
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- Expr *BitWidth = Importer.Import(D->getBitWidth());
- if (!BitWidth && D->getBitWidth())
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ Expr *ToBitWidth;
+ SourceLocation ToInnerLocStart;
+ if (auto Imp = importSeq(
+ D->getType(), D->getTypeSourceInfo(), D->getBitWidth(), D->getInnerLocStart()))
+ std::tie(ToType, ToTypeSourceInfo, ToBitWidth, ToInnerLocStart) = *Imp;
+ else
+ return Imp.takeError();
ObjCIvarDecl *ToIvar;
if (GetImportedOrCreateDecl(
ToIvar, D, Importer.getToContext(), cast<ObjCContainerDecl>(DC),
- Importer.Import(D->getInnerLocStart()), Loc,
- Name.getAsIdentifierInfo(), T, TInfo, D->getAccessControl(), BitWidth,
- D->getSynthesize()))
+ ToInnerLocStart, Loc, Name.getAsIdentifierInfo(),
+ ToType, ToTypeSourceInfo,
+ D->getAccessControl(),ToBitWidth, D->getSynthesize()))
return ToIvar;
ToIvar->setLexicalDeclContext(LexicalDC);
@@ -3146,15 +3595,17 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
return ToIvar;
}
-Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
SmallVector<Decl*, 2> Redecls = getCanonicalForwardRedeclChain(D);
auto RedeclIt = Redecls.begin();
// Import the first part of the decl chain. I.e. import all previous
// declarations starting from the canonical decl.
- for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt)
- if (!Importer.Import(*RedeclIt))
- return nullptr;
+ for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt) {
+ ExpectedDecl RedeclOrErr = import(*RedeclIt);
+ if (!RedeclOrErr)
+ return RedeclOrErr.takeError();
+ }
assert(*RedeclIt == D);
// Import the major distinguishing characteristics of a variable.
@@ -3162,8 +3613,8 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -3213,11 +3664,11 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
if (isa<IncompleteArrayType>(FoundArray) &&
isa<ConstantArrayType>(TArray)) {
// Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ if (auto TyOrErr = import(D->getType()))
+ FoundVar->setType(*TyOrErr);
+ else
+ return TyOrErr.takeError();
- FoundVar->setType(T);
FoundByLookup = FoundVar;
break;
} else if (isa<IncompleteArrayType>(TArray) &&
@@ -3242,25 +3693,31 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- // Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToInnerLocStart;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getType(), D->getTypeSourceInfo(), D->getInnerLocStart(),
+ D->getQualifierLoc()))
+ std::tie(ToType, ToTypeSourceInfo, ToInnerLocStart, ToQualifierLoc) = *Imp;
+ else
+ return Imp.takeError();
// Create the imported variable.
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
VarDecl *ToVar;
if (GetImportedOrCreateDecl(ToVar, D, Importer.getToContext(), DC,
- Importer.Import(D->getInnerLocStart()), Loc,
- Name.getAsIdentifierInfo(), T, TInfo,
+ ToInnerLocStart, Loc,
+ Name.getAsIdentifierInfo(),
+ ToType, ToTypeSourceInfo,
D->getStorageClass()))
return ToVar;
- ToVar->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ ToVar->setQualifierInfo(ToQualifierLoc);
ToVar->setAccess(D->getAccess());
ToVar->setLexicalDeclContext(LexicalDC);
@@ -3269,8 +3726,8 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
ToVar->setPreviousDecl(Recent);
}
- if (ImportInitializer(D, ToVar))
- return nullptr;
+ if (Error Err = ImportInitializer(D, ToVar))
+ return std::move(Err);
if (D->isConstexpr())
ToVar->setConstexpr(true);
@@ -3281,65 +3738,60 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
LexicalDC->addDeclInternal(ToVar);
// Import the rest of the chain. I.e. import all subsequent declarations.
- for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt)
- if (!Importer.Import(*RedeclIt))
- return nullptr;
+ for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) {
+ ExpectedDecl RedeclOrErr = import(*RedeclIt);
+ if (!RedeclOrErr)
+ return RedeclOrErr.takeError();
+ }
return ToVar;
}
-Decl *ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
// Parameters are created in the translation unit's context, then moved
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
- // Import the name of this declaration.
- DeclarationName Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return nullptr;
-
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
-
- // Import the parameter's type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ DeclarationName ToDeclName;
+ SourceLocation ToLocation;
+ QualType ToType;
+ if (auto Imp = importSeq(D->getDeclName(), D->getLocation(), D->getType()))
+ std::tie(ToDeclName, ToLocation, ToType) = *Imp;
+ else
+ return Imp.takeError();
// Create the imported parameter.
ImplicitParamDecl *ToParm = nullptr;
- if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC, Loc,
- Name.getAsIdentifierInfo(), T,
- D->getParameterKind()))
+ if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC,
+ ToLocation, ToDeclName.getAsIdentifierInfo(),
+ ToType, D->getParameterKind()))
return ToParm;
return ToParm;
}
-Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
// Parameters are created in the translation unit's context, then moved
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
- // Import the name of this declaration.
- DeclarationName Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return nullptr;
-
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
-
- // Import the parameter's type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ DeclarationName ToDeclName;
+ SourceLocation ToLocation, ToInnerLocStart;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ if (auto Imp = importSeq(
+ D->getDeclName(), D->getLocation(), D->getType(), D->getInnerLocStart(),
+ D->getTypeSourceInfo()))
+ std::tie(
+ ToDeclName, ToLocation, ToType, ToInnerLocStart,
+ ToTypeSourceInfo) = *Imp;
+ else
+ return Imp.takeError();
- // Create the imported parameter.
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
ParmVarDecl *ToParm;
if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC,
- Importer.Import(D->getInnerLocStart()), Loc,
- Name.getAsIdentifierInfo(), T, TInfo,
- D->getStorageClass(),
+ ToInnerLocStart, ToLocation,
+ ToDeclName.getAsIdentifierInfo(), ToType,
+ ToTypeSourceInfo, D->getStorageClass(),
/*DefaultArg*/ nullptr))
return ToParm;
@@ -3347,21 +3799,19 @@ Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg());
ToParm->setKNRPromoted(D->isKNRPromoted());
- Expr *ToDefArg = nullptr;
- Expr *FromDefArg = nullptr;
if (D->hasUninstantiatedDefaultArg()) {
- FromDefArg = D->getUninstantiatedDefaultArg();
- ToDefArg = Importer.Import(FromDefArg);
- ToParm->setUninstantiatedDefaultArg(ToDefArg);
+ if (auto ToDefArgOrErr = import(D->getUninstantiatedDefaultArg()))
+ ToParm->setUninstantiatedDefaultArg(*ToDefArgOrErr);
+ else
+ return ToDefArgOrErr.takeError();
} else if (D->hasUnparsedDefaultArg()) {
ToParm->setUnparsedDefaultArg();
} else if (D->hasDefaultArg()) {
- FromDefArg = D->getDefaultArg();
- ToDefArg = Importer.Import(FromDefArg);
- ToParm->setDefaultArg(ToDefArg);
+ if (auto ToDefArgOrErr = import(D->getDefaultArg()))
+ ToParm->setDefaultArg(*ToDefArgOrErr);
+ else
+ return ToDefArgOrErr.takeError();
}
- if (FromDefArg && !ToDefArg)
- return nullptr;
if (D->isObjCMethodParameter()) {
ToParm->setObjCMethodScopeInfo(D->getFunctionScopeIndex());
@@ -3374,14 +3824,14 @@ Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
return ToParm;
}
-Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Import the major distinguishing characteristics of a method.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -3401,7 +3851,8 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// Check the number of parameters.
@@ -3412,7 +3863,8 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// Check parameter types.
@@ -3427,7 +3879,8 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
<< (*P)->getType() << (*FoundP)->getType();
Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
<< (*FoundP)->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
@@ -3439,7 +3892,8 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// FIXME: Any other bits we need to merge?
@@ -3447,18 +3901,20 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
}
}
- // Import the result type.
- QualType ResultTy = Importer.Import(D->getReturnType());
- if (ResultTy.isNull())
- return nullptr;
-
- TypeSourceInfo *ReturnTInfo = Importer.Import(D->getReturnTypeSourceInfo());
+ SourceLocation ToEndLoc;
+ QualType ToReturnType;
+ TypeSourceInfo *ToReturnTypeSourceInfo;
+ if (auto Imp = importSeq(
+ D->getEndLoc(), D->getReturnType(), D->getReturnTypeSourceInfo()))
+ std::tie(ToEndLoc, ToReturnType, ToReturnTypeSourceInfo) = *Imp;
+ else
+ return Imp.takeError();
ObjCMethodDecl *ToMethod;
if (GetImportedOrCreateDecl(
ToMethod, D, Importer.getToContext(), Loc,
- Importer.Import(D->getEndLoc()), Name.getObjCSelector(), ResultTy,
- ReturnTInfo, DC, D->isInstanceMethod(), D->isVariadic(),
+ ToEndLoc, Name.getObjCSelector(), ToReturnType,
+ ToReturnTypeSourceInfo, DC, D->isInstanceMethod(), D->isVariadic(),
D->isPropertyAccessor(), D->isImplicit(), D->isDefined(),
D->getImplementationControl(), D->hasRelatedResultType()))
return ToMethod;
@@ -3469,11 +3925,10 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Import the parameters
SmallVector<ParmVarDecl *, 5> ToParams;
for (auto *FromP : D->parameters()) {
- auto *ToP = cast_or_null<ParmVarDecl>(Importer.Import(FromP));
- if (!ToP)
- return nullptr;
-
- ToParams.push_back(ToP);
+ if (Expected<ParmVarDecl *> ToPOrErr = import(FromP))
+ ToParams.push_back(*ToPOrErr);
+ else
+ return ToPOrErr.takeError();
}
// Set the parameters.
@@ -3482,82 +3937,99 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
ToMethod->addDeclInternal(ToParam);
}
- SmallVector<SourceLocation, 12> SelLocs;
- D->getSelectorLocs(SelLocs);
- for (auto &Loc : SelLocs)
- Loc = Importer.Import(Loc);
+ SmallVector<SourceLocation, 12> FromSelLocs;
+ D->getSelectorLocs(FromSelLocs);
+ SmallVector<SourceLocation, 12> ToSelLocs(FromSelLocs.size());
+ if (Error Err = ImportContainerChecked(FromSelLocs, ToSelLocs))
+ return std::move(Err);
- ToMethod->setMethodParams(Importer.getToContext(), ToParams, SelLocs);
+ ToMethod->setMethodParams(Importer.getToContext(), ToParams, ToSelLocs);
ToMethod->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToMethod);
return ToMethod;
}
-Decl *ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
// Import the major distinguishing characteristics of a category.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- TypeSourceInfo *BoundInfo = Importer.Import(D->getTypeSourceInfo());
- if (!BoundInfo)
- return nullptr;
+ SourceLocation ToVarianceLoc, ToLocation, ToColonLoc;
+ TypeSourceInfo *ToTypeSourceInfo;
+ if (auto Imp = importSeq(
+ D->getVarianceLoc(), D->getLocation(), D->getColonLoc(),
+ D->getTypeSourceInfo()))
+ std::tie(ToVarianceLoc, ToLocation, ToColonLoc, ToTypeSourceInfo) = *Imp;
+ else
+ return Imp.takeError();
ObjCTypeParamDecl *Result;
if (GetImportedOrCreateDecl(
Result, D, Importer.getToContext(), DC, D->getVariance(),
- Importer.Import(D->getVarianceLoc()), D->getIndex(),
- Importer.Import(D->getLocation()), Name.getAsIdentifierInfo(),
- Importer.Import(D->getColonLoc()), BoundInfo))
+ ToVarianceLoc, D->getIndex(),
+ ToLocation, Name.getAsIdentifierInfo(),
+ ToColonLoc, ToTypeSourceInfo))
return Result;
Result->setLexicalDeclContext(LexicalDC);
return Result;
}
-Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
// Import the major distinguishing characteristics of a category.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- auto *ToInterface =
- cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface()));
- if (!ToInterface)
- return nullptr;
+ ObjCInterfaceDecl *ToInterface;
+ if (Error Err = importInto(ToInterface, D->getClassInterface()))
+ return std::move(Err);
// Determine if we've already encountered this category.
ObjCCategoryDecl *MergeWithCategory
= ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
ObjCCategoryDecl *ToCategory = MergeWithCategory;
if (!ToCategory) {
+ SourceLocation ToAtStartLoc, ToCategoryNameLoc;
+ SourceLocation ToIvarLBraceLoc, ToIvarRBraceLoc;
+ if (auto Imp = importSeq(
+ D->getAtStartLoc(), D->getCategoryNameLoc(),
+ D->getIvarLBraceLoc(), D->getIvarRBraceLoc()))
+ std::tie(
+ ToAtStartLoc, ToCategoryNameLoc,
+ ToIvarLBraceLoc, ToIvarRBraceLoc) = *Imp;
+ else
+ return Imp.takeError();
if (GetImportedOrCreateDecl(ToCategory, D, Importer.getToContext(), DC,
- Importer.Import(D->getAtStartLoc()), Loc,
- Importer.Import(D->getCategoryNameLoc()),
+ ToAtStartLoc, Loc,
+ ToCategoryNameLoc,
Name.getAsIdentifierInfo(), ToInterface,
/*TypeParamList=*/nullptr,
- Importer.Import(D->getIvarLBraceLoc()),
- Importer.Import(D->getIvarRBraceLoc())))
+ ToIvarLBraceLoc,
+ ToIvarRBraceLoc))
return ToCategory;
ToCategory->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToCategory);
- // Import the type parameter list after calling Imported, to avoid
+ // Import the type parameter list after MapImported, to avoid
// loops when bringing in their DeclContext.
- ToCategory->setTypeParamList(ImportObjCTypeParamList(
- D->getTypeParamList()));
+ if (auto PListOrErr = ImportObjCTypeParamList(D->getTypeParamList()))
+ ToCategory->setTypeParamList(*PListOrErr);
+ else
+ return PListOrErr.takeError();
// Import protocols
SmallVector<ObjCProtocolDecl *, 4> Protocols;
@@ -3568,45 +4040,48 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
FromProtoEnd = D->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
- auto *ToProto =
- cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
- if (!ToProto)
- return nullptr;
- Protocols.push_back(ToProto);
- ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ if (Expected<ObjCProtocolDecl *> ToProtoOrErr = import(*FromProto))
+ Protocols.push_back(*ToProtoOrErr);
+ else
+ return ToProtoOrErr.takeError();
+
+ if (ExpectedSLoc ToProtoLocOrErr = import(*FromProtoLoc))
+ ProtocolLocs.push_back(*ToProtoLocOrErr);
+ else
+ return ToProtoLocOrErr.takeError();
}
// FIXME: If we're merging, make sure that the protocol list is the same.
ToCategory->setProtocolList(Protocols.data(), Protocols.size(),
ProtocolLocs.data(), Importer.getToContext());
+
} else {
Importer.MapImported(D, ToCategory);
}
// Import all of the members of this category.
- ImportDeclContext(D);
+ if (Error Err = ImportDeclContext(D))
+ return std::move(Err);
// If we have an implementation, import it as well.
if (D->getImplementation()) {
- auto *Impl =
- cast_or_null<ObjCCategoryImplDecl>(
- Importer.Import(D->getImplementation()));
- if (!Impl)
- return nullptr;
-
- ToCategory->setImplementation(Impl);
+ if (Expected<ObjCCategoryImplDecl *> ToImplOrErr =
+ import(D->getImplementation()))
+ ToCategory->setImplementation(*ToImplOrErr);
+ else
+ return ToImplOrErr.takeError();
}
return ToCategory;
}
-bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
- ObjCProtocolDecl *To,
- ImportDefinitionKind Kind) {
+Error ASTNodeImporter::ImportDefinition(
+ ObjCProtocolDecl *From, ObjCProtocolDecl *To, ImportDefinitionKind Kind) {
if (To->getDefinition()) {
if (shouldForceImportDeclContext(Kind))
- ImportDeclContext(From);
- return false;
+ if (Error Err = ImportDeclContext(From))
+ return Err;
+ return Error::success();
}
// Start the protocol definition
@@ -3615,17 +4090,22 @@ bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
// Import protocols
SmallVector<ObjCProtocolDecl *, 4> Protocols;
SmallVector<SourceLocation, 4> ProtocolLocs;
- ObjCProtocolDecl::protocol_loc_iterator
- FromProtoLoc = From->protocol_loc_begin();
+ ObjCProtocolDecl::protocol_loc_iterator FromProtoLoc =
+ From->protocol_loc_begin();
for (ObjCProtocolDecl::protocol_iterator FromProto = From->protocol_begin(),
FromProtoEnd = From->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
- auto *ToProto = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
- if (!ToProto)
- return true;
- Protocols.push_back(ToProto);
- ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ if (Expected<ObjCProtocolDecl *> ToProtoOrErr = import(*FromProto))
+ Protocols.push_back(*ToProtoOrErr);
+ else
+ return ToProtoOrErr.takeError();
+
+ if (ExpectedSLoc ToProtoLocOrErr = import(*FromProtoLoc))
+ ProtocolLocs.push_back(*ToProtoLocOrErr);
+ else
+ return ToProtoLocOrErr.takeError();
+
}
// FIXME: If we're merging, make sure that the protocol list is the same.
@@ -3634,22 +4114,22 @@ bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
if (shouldForceImportDeclContext(Kind)) {
// Import all of the members of this protocol.
- ImportDeclContext(From, /*ForceImport=*/true);
+ if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
+ return Err;
}
- return false;
+ return Error::success();
}
-Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
// If this protocol has a definition in the translation unit we're coming
// from, but this particular declaration is not that definition, import the
// definition and map to that.
ObjCProtocolDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(Definition))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
// Import the major distinguishing characteristics of a protocol.
@@ -3657,8 +4137,8 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -3675,9 +4155,13 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
ObjCProtocolDecl *ToProto = MergeWithProtocol;
if (!ToProto) {
+ auto ToAtBeginLocOrErr = import(D->getAtStartLoc());
+ if (!ToAtBeginLocOrErr)
+ return ToAtBeginLocOrErr.takeError();
+
if (GetImportedOrCreateDecl(ToProto, D, Importer.getToContext(), DC,
Name.getAsIdentifierInfo(), Loc,
- Importer.Import(D->getAtStartLoc()),
+ *ToAtBeginLocOrErr,
/*PrevDecl=*/nullptr))
return ToProto;
ToProto->setLexicalDeclContext(LexicalDC);
@@ -3686,29 +4170,39 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
Importer.MapImported(D, ToProto);
- if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToProto))
- return nullptr;
+ if (D->isThisDeclarationADefinition())
+ if (Error Err = ImportDefinition(D, ToProto))
+ return std::move(Err);
return ToProto;
}
-Decl *ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- DeclContext *LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ExpectedDecl ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
- SourceLocation ExternLoc = Importer.Import(D->getExternLoc());
- SourceLocation LangLoc = Importer.Import(D->getLocation());
+ ExpectedSLoc ExternLocOrErr = import(D->getExternLoc());
+ if (!ExternLocOrErr)
+ return ExternLocOrErr.takeError();
+
+ ExpectedSLoc LangLocOrErr = import(D->getLocation());
+ if (!LangLocOrErr)
+ return LangLocOrErr.takeError();
bool HasBraces = D->hasBraces();
LinkageSpecDecl *ToLinkageSpec;
if (GetImportedOrCreateDecl(ToLinkageSpec, D, Importer.getToContext(), DC,
- ExternLoc, LangLoc, D->getLanguage(), HasBraces))
+ *ExternLocOrErr, *LangLocOrErr,
+ D->getLanguage(), HasBraces))
return ToLinkageSpec;
if (HasBraces) {
- SourceLocation RBraceLoc = Importer.Import(D->getRBraceLoc());
- ToLinkageSpec->setRBraceLoc(RBraceLoc);
+ ExpectedSLoc RBraceLocOrErr = import(D->getRBraceLoc());
+ if (!RBraceLocOrErr)
+ return RBraceLocOrErr.takeError();
+ ToLinkageSpec->setRBraceLoc(*RBraceLocOrErr);
}
ToLinkageSpec->setLexicalDeclContext(LexicalDC);
@@ -3717,24 +4211,31 @@ Decl *ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
return ToLinkageSpec;
}
-Decl *ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- DeclarationNameInfo NameInfo(Name,
- Importer.Import(D->getNameInfo().getLoc()));
- ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+ SourceLocation ToLoc, ToUsingLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getNameInfo().getLoc(), D->getUsingLoc(), D->getQualifierLoc()))
+ std::tie(ToLoc, ToUsingLoc, ToQualifierLoc) = *Imp;
+ else
+ return Imp.takeError();
+
+ DeclarationNameInfo NameInfo(Name, ToLoc);
+ if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
+ return std::move(Err);
UsingDecl *ToUsing;
if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC,
- Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getQualifierLoc()), NameInfo,
+ ToUsingLoc, ToQualifierLoc, NameInfo,
D->hasTypename()))
return ToUsing;
@@ -3743,48 +4244,45 @@ Decl *ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
if (NamedDecl *FromPattern =
Importer.getFromContext().getInstantiatedFromUsingDecl(D)) {
- if (auto *ToPattern =
- dyn_cast_or_null<NamedDecl>(Importer.Import(FromPattern)))
- Importer.getToContext().setInstantiatedFromUsingDecl(ToUsing, ToPattern);
+ if (Expected<NamedDecl *> ToPatternOrErr = import(FromPattern))
+ Importer.getToContext().setInstantiatedFromUsingDecl(
+ ToUsing, *ToPatternOrErr);
else
- return nullptr;
+ return ToPatternOrErr.takeError();
}
- for (auto *FromShadow : D->shadows()) {
- if (auto *ToShadow =
- dyn_cast_or_null<UsingShadowDecl>(Importer.Import(FromShadow)))
- ToUsing->addShadowDecl(ToShadow);
+ for (UsingShadowDecl *FromShadow : D->shadows()) {
+ if (Expected<UsingShadowDecl *> ToShadowOrErr = import(FromShadow))
+ ToUsing->addShadowDecl(*ToShadowOrErr);
else
- // FIXME: We return a nullptr here but the definition is already created
+ // FIXME: We return error here but the definition is already created
// and available with lookups. How to fix this?..
- return nullptr;
+ return ToShadowOrErr.takeError();
}
return ToUsing;
}
-Decl *ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- auto *ToUsing = dyn_cast_or_null<UsingDecl>(
- Importer.Import(D->getUsingDecl()));
- if (!ToUsing)
- return nullptr;
+ Expected<UsingDecl *> ToUsingOrErr = import(D->getUsingDecl());
+ if (!ToUsingOrErr)
+ return ToUsingOrErr.takeError();
- auto *ToTarget = dyn_cast_or_null<NamedDecl>(
- Importer.Import(D->getTargetDecl()));
- if (!ToTarget)
- return nullptr;
+ Expected<NamedDecl *> ToTargetOrErr = import(D->getTargetDecl());
+ if (!ToTargetOrErr)
+ return ToTargetOrErr.takeError();
UsingShadowDecl *ToShadow;
if (GetImportedOrCreateDecl(ToShadow, D, Importer.getToContext(), DC, Loc,
- ToUsing, ToTarget))
+ *ToUsingOrErr, *ToTargetOrErr))
return ToShadow;
ToShadow->setLexicalDeclContext(LexicalDC);
@@ -3792,14 +4290,13 @@ Decl *ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
if (UsingShadowDecl *FromPattern =
Importer.getFromContext().getInstantiatedFromUsingShadowDecl(D)) {
- if (auto *ToPattern =
- dyn_cast_or_null<UsingShadowDecl>(Importer.Import(FromPattern)))
- Importer.getToContext().setInstantiatedFromUsingShadowDecl(ToShadow,
- ToPattern);
+ if (Expected<UsingShadowDecl *> ToPatternOrErr = import(FromPattern))
+ Importer.getToContext().setInstantiatedFromUsingShadowDecl(
+ ToShadow, *ToPatternOrErr);
else
- // FIXME: We return a nullptr here but the definition is already created
+ // FIXME: We return error here but the definition is already created
// and available with lookups. How to fix this?..
- return nullptr;
+ return ToPatternOrErr.takeError();
}
LexicalDC->addDeclInternal(ToShadow);
@@ -3807,32 +4304,40 @@ Decl *ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
return ToShadow;
}
-Decl *ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- DeclContext *ToComAncestor = Importer.ImportContext(D->getCommonAncestor());
- if (!ToComAncestor)
- return nullptr;
-
- auto *ToNominated = cast_or_null<NamespaceDecl>(
- Importer.Import(D->getNominatedNamespace()));
- if (!ToNominated)
- return nullptr;
+ auto ToComAncestorOrErr = Importer.ImportContext(D->getCommonAncestor());
+ if (!ToComAncestorOrErr)
+ return ToComAncestorOrErr.takeError();
+
+ NamespaceDecl *ToNominatedNamespace;
+ SourceLocation ToUsingLoc, ToNamespaceKeyLocation, ToIdentLocation;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getNominatedNamespace(), D->getUsingLoc(),
+ D->getNamespaceKeyLocation(), D->getQualifierLoc(),
+ D->getIdentLocation()))
+ std::tie(
+ ToNominatedNamespace, ToUsingLoc, ToNamespaceKeyLocation,
+ ToQualifierLoc, ToIdentLocation) = *Imp;
+ else
+ return Imp.takeError();
UsingDirectiveDecl *ToUsingDir;
if (GetImportedOrCreateDecl(ToUsingDir, D, Importer.getToContext(), DC,
- Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getNamespaceKeyLocation()),
- Importer.Import(D->getQualifierLoc()),
- Importer.Import(D->getIdentLocation()),
- ToNominated, ToComAncestor))
+ ToUsingLoc,
+ ToNamespaceKeyLocation,
+ ToQualifierLoc,
+ ToIdentLocation,
+ ToNominatedNamespace, *ToComAncestorOrErr))
return ToUsingDir;
ToUsingDir->setLexicalDeclContext(LexicalDC);
@@ -3841,25 +4346,34 @@ Decl *ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
return ToUsingDir;
}
-Decl *ASTNodeImporter::VisitUnresolvedUsingValueDecl(
+ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingValueDecl(
UnresolvedUsingValueDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- DeclarationNameInfo NameInfo(Name, Importer.Import(D->getNameInfo().getLoc()));
- ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+ SourceLocation ToLoc, ToUsingLoc, ToEllipsisLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getNameInfo().getLoc(), D->getUsingLoc(), D->getQualifierLoc(),
+ D->getEllipsisLoc()))
+ std::tie(ToLoc, ToUsingLoc, ToQualifierLoc, ToEllipsisLoc) = *Imp;
+ else
+ return Imp.takeError();
+
+ DeclarationNameInfo NameInfo(Name, ToLoc);
+ if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
+ return std::move(Err);
UnresolvedUsingValueDecl *ToUsingValue;
if (GetImportedOrCreateDecl(ToUsingValue, D, Importer.getToContext(), DC,
- Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getQualifierLoc()), NameInfo,
- Importer.Import(D->getEllipsisLoc())))
+ ToUsingLoc, ToQualifierLoc, NameInfo,
+ ToEllipsisLoc))
return ToUsingValue;
ToUsingValue->setAccess(D->getAccess());
@@ -3869,23 +4383,30 @@ Decl *ASTNodeImporter::VisitUnresolvedUsingValueDecl(
return ToUsingValue;
}
-Decl *ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
+ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
UnresolvedUsingTypenameDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
+ SourceLocation ToUsingLoc, ToTypenameLoc, ToEllipsisLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getUsingLoc(), D->getTypenameLoc(), D->getQualifierLoc(),
+ D->getEllipsisLoc()))
+ std::tie(ToUsingLoc, ToTypenameLoc, ToQualifierLoc, ToEllipsisLoc) = *Imp;
+ else
+ return Imp.takeError();
+
UnresolvedUsingTypenameDecl *ToUsing;
if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC,
- Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getTypenameLoc()),
- Importer.Import(D->getQualifierLoc()), Loc, Name,
- Importer.Import(D->getEllipsisLoc())))
+ ToUsingLoc, ToTypenameLoc,
+ ToQualifierLoc, Loc, Name, ToEllipsisLoc))
return ToUsing;
ToUsing->setAccess(D->getAccess());
@@ -3895,16 +4416,17 @@ Decl *ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
return ToUsing;
}
-bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
- ObjCInterfaceDecl *To,
- ImportDefinitionKind Kind) {
+
+Error ASTNodeImporter::ImportDefinition(
+ ObjCInterfaceDecl *From, ObjCInterfaceDecl *To, ImportDefinitionKind Kind) {
if (To->getDefinition()) {
// Check consistency of superclass.
ObjCInterfaceDecl *FromSuper = From->getSuperClass();
if (FromSuper) {
- FromSuper = cast_or_null<ObjCInterfaceDecl>(Importer.Import(FromSuper));
- if (!FromSuper)
- return true;
+ if (auto FromSuperOrErr = import(FromSuper))
+ FromSuper = *FromSuperOrErr;
+ else
+ return FromSuperOrErr.takeError();
}
ObjCInterfaceDecl *ToSuper = To->getSuperClass();
@@ -3929,8 +4451,9 @@ bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
}
if (shouldForceImportDeclContext(Kind))
- ImportDeclContext(From);
- return false;
+ if (Error Err = ImportDeclContext(From))
+ return Err;
+ return Error::success();
}
// Start the definition.
@@ -3938,28 +4461,32 @@ bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
// If this class has a superclass, import it.
if (From->getSuperClass()) {
- TypeSourceInfo *SuperTInfo = Importer.Import(From->getSuperClassTInfo());
- if (!SuperTInfo)
- return true;
-
- To->setSuperClass(SuperTInfo);
+ if (auto SuperTInfoOrErr = import(From->getSuperClassTInfo()))
+ To->setSuperClass(*SuperTInfoOrErr);
+ else
+ return SuperTInfoOrErr.takeError();
}
// Import protocols
SmallVector<ObjCProtocolDecl *, 4> Protocols;
SmallVector<SourceLocation, 4> ProtocolLocs;
- ObjCInterfaceDecl::protocol_loc_iterator
- FromProtoLoc = From->protocol_loc_begin();
+ ObjCInterfaceDecl::protocol_loc_iterator FromProtoLoc =
+ From->protocol_loc_begin();
for (ObjCInterfaceDecl::protocol_iterator FromProto = From->protocol_begin(),
FromProtoEnd = From->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
- auto *ToProto = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
- if (!ToProto)
- return true;
- Protocols.push_back(ToProto);
- ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ if (Expected<ObjCProtocolDecl *> ToProtoOrErr = import(*FromProto))
+ Protocols.push_back(*ToProtoOrErr);
+ else
+ return ToProtoOrErr.takeError();
+
+ if (ExpectedSLoc ToProtoLocOrErr = import(*FromProtoLoc))
+ ProtocolLocs.push_back(*ToProtoLocOrErr);
+ else
+ return ToProtoLocOrErr.takeError();
+
}
// FIXME: If we're merging, make sure that the protocol list is the same.
@@ -3968,58 +4495,66 @@ bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
// Import categories. When the categories themselves are imported, they'll
// hook themselves into this interface.
- for (auto *Cat : From->known_categories())
- Importer.Import(Cat);
+ for (auto *Cat : From->known_categories()) {
+ auto ToCatOrErr = import(Cat);
+ if (!ToCatOrErr)
+ return ToCatOrErr.takeError();
+ }
// If we have an @implementation, import it as well.
if (From->getImplementation()) {
- auto *Impl = cast_or_null<ObjCImplementationDecl>(
- Importer.Import(From->getImplementation()));
- if (!Impl)
- return true;
-
- To->setImplementation(Impl);
+ if (Expected<ObjCImplementationDecl *> ToImplOrErr =
+ import(From->getImplementation()))
+ To->setImplementation(*ToImplOrErr);
+ else
+ return ToImplOrErr.takeError();
}
if (shouldForceImportDeclContext(Kind)) {
// Import all of the members of this class.
- ImportDeclContext(From, /*ForceImport=*/true);
+ if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
+ return Err;
}
- return false;
+ return Error::success();
}
-ObjCTypeParamList *
+Expected<ObjCTypeParamList *>
ASTNodeImporter::ImportObjCTypeParamList(ObjCTypeParamList *list) {
if (!list)
return nullptr;
SmallVector<ObjCTypeParamDecl *, 4> toTypeParams;
- for (auto fromTypeParam : *list) {
- auto *toTypeParam = cast_or_null<ObjCTypeParamDecl>(
- Importer.Import(fromTypeParam));
- if (!toTypeParam)
- return nullptr;
-
- toTypeParams.push_back(toTypeParam);
+ for (auto *fromTypeParam : *list) {
+ if (auto toTypeParamOrErr = import(fromTypeParam))
+ toTypeParams.push_back(*toTypeParamOrErr);
+ else
+ return toTypeParamOrErr.takeError();
}
+ auto LAngleLocOrErr = import(list->getLAngleLoc());
+ if (!LAngleLocOrErr)
+ return LAngleLocOrErr.takeError();
+
+ auto RAngleLocOrErr = import(list->getRAngleLoc());
+ if (!RAngleLocOrErr)
+ return RAngleLocOrErr.takeError();
+
return ObjCTypeParamList::create(Importer.getToContext(),
- Importer.Import(list->getLAngleLoc()),
+ *LAngleLocOrErr,
toTypeParams,
- Importer.Import(list->getRAngleLoc()));
+ *RAngleLocOrErr);
}
-Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
// If this class has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
ObjCInterfaceDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(Definition))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
// Import the major distinguishing characteristics of an @interface.
@@ -4027,8 +4562,8 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -4047,9 +4582,13 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
// Create an interface declaration, if one does not already exist.
ObjCInterfaceDecl *ToIface = MergeWithIface;
if (!ToIface) {
+ ExpectedSLoc AtBeginLocOrErr = import(D->getAtStartLoc());
+ if (!AtBeginLocOrErr)
+ return AtBeginLocOrErr.takeError();
+
if (GetImportedOrCreateDecl(
ToIface, D, Importer.getToContext(), DC,
- Importer.Import(D->getAtStartLoc()), Name.getAsIdentifierInfo(),
+ *AtBeginLocOrErr, Name.getAsIdentifierInfo(),
/*TypeParamList=*/nullptr,
/*PrevDecl=*/nullptr, Loc, D->isImplicitInterfaceDecl()))
return ToIface;
@@ -4057,91 +4596,99 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
LexicalDC->addDeclInternal(ToIface);
}
Importer.MapImported(D, ToIface);
- // Import the type parameter list after calling Imported, to avoid
+ // Import the type parameter list after MapImported, to avoid
// loops when bringing in their DeclContext.
- ToIface->setTypeParamList(ImportObjCTypeParamList(
- D->getTypeParamListAsWritten()));
+ if (auto ToPListOrErr =
+ ImportObjCTypeParamList(D->getTypeParamListAsWritten()))
+ ToIface->setTypeParamList(*ToPListOrErr);
+ else
+ return ToPListOrErr.takeError();
- if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToIface))
- return nullptr;
+ if (D->isThisDeclarationADefinition())
+ if (Error Err = ImportDefinition(D, ToIface))
+ return std::move(Err);
return ToIface;
}
-Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
- auto *Category = cast_or_null<ObjCCategoryDecl>(
- Importer.Import(D->getCategoryDecl()));
- if (!Category)
- return nullptr;
+ExpectedDecl
+ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ ObjCCategoryDecl *Category;
+ if (Error Err = importInto(Category, D->getCategoryDecl()))
+ return std::move(Err);
ObjCCategoryImplDecl *ToImpl = Category->getImplementation();
if (!ToImpl) {
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
+
+ SourceLocation ToLocation, ToAtStartLoc, ToCategoryNameLoc;
+ if (auto Imp = importSeq(
+ D->getLocation(), D->getAtStartLoc(), D->getCategoryNameLoc()))
+ std::tie(ToLocation, ToAtStartLoc, ToCategoryNameLoc) = *Imp;
+ else
+ return Imp.takeError();
- SourceLocation CategoryNameLoc = Importer.Import(D->getCategoryNameLoc());
if (GetImportedOrCreateDecl(
ToImpl, D, Importer.getToContext(), DC,
Importer.Import(D->getIdentifier()), Category->getClassInterface(),
- Importer.Import(D->getLocation()),
- Importer.Import(D->getAtStartLoc()), CategoryNameLoc))
+ ToLocation, ToAtStartLoc, ToCategoryNameLoc))
return ToImpl;
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
-
- ToImpl->setLexicalDeclContext(LexicalDC);
- }
-
+ ToImpl->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToImpl);
Category->setImplementation(ToImpl);
}
Importer.MapImported(D, ToImpl);
- ImportDeclContext(D);
+ if (Error Err = ImportDeclContext(D))
+ return std::move(Err);
+
return ToImpl;
}
-Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ExpectedDecl
+ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
// Find the corresponding interface.
- auto *Iface = cast_or_null<ObjCInterfaceDecl>(
- Importer.Import(D->getClassInterface()));
- if (!Iface)
- return nullptr;
+ ObjCInterfaceDecl *Iface;
+ if (Error Err = importInto(Iface, D->getClassInterface()))
+ return std::move(Err);
// Import the superclass, if any.
- ObjCInterfaceDecl *Super = nullptr;
- if (D->getSuperClass()) {
- Super = cast_or_null<ObjCInterfaceDecl>(
- Importer.Import(D->getSuperClass()));
- if (!Super)
- return nullptr;
- }
+ ObjCInterfaceDecl *Super;
+ if (Error Err = importInto(Super, D->getSuperClass()))
+ return std::move(Err);
ObjCImplementationDecl *Impl = Iface->getImplementation();
if (!Impl) {
// We haven't imported an implementation yet. Create a new @implementation
// now.
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
+
+ SourceLocation ToLocation, ToAtStartLoc, ToSuperClassLoc;
+ SourceLocation ToIvarLBraceLoc, ToIvarRBraceLoc;
+ if (auto Imp = importSeq(
+ D->getLocation(), D->getAtStartLoc(), D->getSuperClassLoc(),
+ D->getIvarLBraceLoc(), D->getIvarRBraceLoc()))
+ std::tie(
+ ToLocation, ToAtStartLoc, ToSuperClassLoc,
+ ToIvarLBraceLoc, ToIvarRBraceLoc) = *Imp;
+ else
+ return Imp.takeError();
+
if (GetImportedOrCreateDecl(Impl, D, Importer.getToContext(),
- Importer.ImportContext(D->getDeclContext()),
- Iface, Super, Importer.Import(D->getLocation()),
- Importer.Import(D->getAtStartLoc()),
- Importer.Import(D->getSuperClassLoc()),
- Importer.Import(D->getIvarLBraceLoc()),
- Importer.Import(D->getIvarRBraceLoc())))
+ DC, Iface, Super,
+ ToLocation,
+ ToAtStartLoc,
+ ToSuperClassLoc,
+ ToIvarLBraceLoc,
+ ToIvarRBraceLoc))
return Impl;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- DeclContext *LexicalDC
- = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- Impl->setLexicalDeclContext(LexicalDC);
- }
+ Impl->setLexicalDeclContext(LexicalDC);
// Associate the implementation with the class it implements.
Iface->setImplementation(Impl);
@@ -4174,24 +4721,26 @@ Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
else
Importer.FromDiag(D->getLocation(),
diag::note_odr_objc_missing_superclass);
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
// Import all of the members of this @implementation.
- ImportDeclContext(D);
+ if (Error Err = ImportDeclContext(D))
+ return std::move(Err);
return Impl;
}
-Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
// Import the major distinguishing characteristics of an @property.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -4207,7 +4756,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
<< Name << D->getType() << FoundProp->getType();
Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
<< FoundProp->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// FIXME: Check property attributes, getters, setters, etc.?
@@ -4218,79 +4768,88 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
}
}
- // Import the type.
- TypeSourceInfo *TSI = Importer.Import(D->getTypeSourceInfo());
- if (!TSI)
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToAtLoc, ToLParenLoc;
+ if (auto Imp = importSeq(
+ D->getType(), D->getTypeSourceInfo(), D->getAtLoc(), D->getLParenLoc()))
+ std::tie(ToType, ToTypeSourceInfo, ToAtLoc, ToLParenLoc) = *Imp;
+ else
+ return Imp.takeError();
// Create the new property.
ObjCPropertyDecl *ToProperty;
if (GetImportedOrCreateDecl(
ToProperty, D, Importer.getToContext(), DC, Loc,
- Name.getAsIdentifierInfo(), Importer.Import(D->getAtLoc()),
- Importer.Import(D->getLParenLoc()), Importer.Import(D->getType()),
- TSI, D->getPropertyImplementation()))
+ Name.getAsIdentifierInfo(), ToAtLoc,
+ ToLParenLoc, ToType,
+ ToTypeSourceInfo, D->getPropertyImplementation()))
return ToProperty;
+ Selector ToGetterName, ToSetterName;
+ SourceLocation ToGetterNameLoc, ToSetterNameLoc;
+ ObjCMethodDecl *ToGetterMethodDecl, *ToSetterMethodDecl;
+ ObjCIvarDecl *ToPropertyIvarDecl;
+ if (auto Imp = importSeq(
+ D->getGetterName(), D->getSetterName(),
+ D->getGetterNameLoc(), D->getSetterNameLoc(),
+ D->getGetterMethodDecl(), D->getSetterMethodDecl(),
+ D->getPropertyIvarDecl()))
+ std::tie(
+ ToGetterName, ToSetterName,
+ ToGetterNameLoc, ToSetterNameLoc,
+ ToGetterMethodDecl, ToSetterMethodDecl,
+ ToPropertyIvarDecl) = *Imp;
+ else
+ return Imp.takeError();
+
ToProperty->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToProperty);
ToProperty->setPropertyAttributes(D->getPropertyAttributes());
ToProperty->setPropertyAttributesAsWritten(
D->getPropertyAttributesAsWritten());
- ToProperty->setGetterName(Importer.Import(D->getGetterName()),
- Importer.Import(D->getGetterNameLoc()));
- ToProperty->setSetterName(Importer.Import(D->getSetterName()),
- Importer.Import(D->getSetterNameLoc()));
- ToProperty->setGetterMethodDecl(
- cast_or_null<ObjCMethodDecl>(Importer.Import(D->getGetterMethodDecl())));
- ToProperty->setSetterMethodDecl(
- cast_or_null<ObjCMethodDecl>(Importer.Import(D->getSetterMethodDecl())));
- ToProperty->setPropertyIvarDecl(
- cast_or_null<ObjCIvarDecl>(Importer.Import(D->getPropertyIvarDecl())));
+ ToProperty->setGetterName(ToGetterName, ToGetterNameLoc);
+ ToProperty->setSetterName(ToSetterName, ToSetterNameLoc);
+ ToProperty->setGetterMethodDecl(ToGetterMethodDecl);
+ ToProperty->setSetterMethodDecl(ToSetterMethodDecl);
+ ToProperty->setPropertyIvarDecl(ToPropertyIvarDecl);
return ToProperty;
}
-Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
- auto *Property = cast_or_null<ObjCPropertyDecl>(
- Importer.Import(D->getPropertyDecl()));
- if (!Property)
- return nullptr;
-
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
+ExpectedDecl
+ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ ObjCPropertyDecl *Property;
+ if (Error Err = importInto(Property, D->getPropertyDecl()))
+ return std::move(Err);
- // Import the lexical declaration context.
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- }
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
- auto *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC);
- if (!InImpl)
- return nullptr;
+ auto *InImpl = cast<ObjCImplDecl>(LexicalDC);
// Import the ivar (for an @synthesize).
ObjCIvarDecl *Ivar = nullptr;
- if (D->getPropertyIvarDecl()) {
- Ivar = cast_or_null<ObjCIvarDecl>(
- Importer.Import(D->getPropertyIvarDecl()));
- if (!Ivar)
- return nullptr;
- }
+ if (Error Err = importInto(Ivar, D->getPropertyIvarDecl()))
+ return std::move(Err);
ObjCPropertyImplDecl *ToImpl
= InImpl->FindPropertyImplDecl(Property->getIdentifier(),
Property->getQueryKind());
if (!ToImpl) {
+ SourceLocation ToBeginLoc, ToLocation, ToPropertyIvarDeclLoc;
+ if (auto Imp = importSeq(
+ D->getBeginLoc(), D->getLocation(), D->getPropertyIvarDeclLoc()))
+ std::tie(ToBeginLoc, ToLocation, ToPropertyIvarDeclLoc) = *Imp;
+ else
+ return Imp.takeError();
+
if (GetImportedOrCreateDecl(ToImpl, D, Importer.getToContext(), DC,
- Importer.Import(D->getBeginLoc()),
- Importer.Import(D->getLocation()), Property,
+ ToBeginLoc,
+ ToLocation, Property,
D->getPropertyImplementation(), Ivar,
- Importer.Import(D->getPropertyIvarDeclLoc())))
+ ToPropertyIvarDeclLoc))
return ToImpl;
ToImpl->setLexicalDeclContext(LexicalDC);
@@ -4308,7 +4867,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
diag::note_odr_objc_property_impl_kind)
<< D->getPropertyDecl()->getDeclName()
<< (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// For @synthesize, check that we have the same
@@ -4322,7 +4882,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
Importer.FromDiag(D->getPropertyIvarDeclLoc(),
diag::note_odr_objc_synthesize_ivar_here)
<< D->getPropertyIvarDecl()->getDeclName();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// Merge the existing implementation with the new implementation.
@@ -4332,41 +4893,46 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
return ToImpl;
}
-Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ExpectedDecl
+ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
// For template arguments, we adopt the translation unit as our declaration
// context. This context will be fixed when the actual template declaration
// is created.
// FIXME: Import default argument.
+
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
+ ExpectedSLoc LocationOrErr = import(D->getLocation());
+ if (!LocationOrErr)
+ return LocationOrErr.takeError();
+
TemplateTypeParmDecl *ToD = nullptr;
(void)GetImportedOrCreateDecl(
ToD, D, Importer.getToContext(),
Importer.getToContext().getTranslationUnitDecl(),
- Importer.Import(D->getBeginLoc()), Importer.Import(D->getLocation()),
+ *BeginLocOrErr, *LocationOrErr,
D->getDepth(), D->getIndex(), Importer.Import(D->getIdentifier()),
D->wasDeclaredWithTypename(), D->isParameterPack());
return ToD;
}
-Decl *
+ExpectedDecl
ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
- // Import the name of this declaration.
- DeclarationName Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return nullptr;
-
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
-
- // Import the type of this declaration.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
-
- // Import type-source information.
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- if (D->getTypeSourceInfo() && !TInfo)
- return nullptr;
+ DeclarationName ToDeclName;
+ SourceLocation ToLocation, ToInnerLocStart;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ if (auto Imp = importSeq(
+ D->getDeclName(), D->getLocation(), D->getType(), D->getTypeSourceInfo(),
+ D->getInnerLocStart()))
+ std::tie(
+ ToDeclName, ToLocation, ToType, ToTypeSourceInfo,
+ ToInnerLocStart) = *Imp;
+ else
+ return Imp.takeError();
// FIXME: Import default argument.
@@ -4374,36 +4940,39 @@ ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
(void)GetImportedOrCreateDecl(
ToD, D, Importer.getToContext(),
Importer.getToContext().getTranslationUnitDecl(),
- Importer.Import(D->getInnerLocStart()), Loc, D->getDepth(),
- D->getPosition(), Name.getAsIdentifierInfo(), T, D->isParameterPack(),
- TInfo);
+ ToInnerLocStart, ToLocation, D->getDepth(),
+ D->getPosition(), ToDeclName.getAsIdentifierInfo(), ToType,
+ D->isParameterPack(), ToTypeSourceInfo);
return ToD;
}
-Decl *
+ExpectedDecl
ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
// Import the name of this declaration.
- DeclarationName Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return nullptr;
+ auto NameOrErr = import(D->getDeclName());
+ if (!NameOrErr)
+ return NameOrErr.takeError();
// Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
+ ExpectedSLoc LocationOrErr = import(D->getLocation());
+ if (!LocationOrErr)
+ return LocationOrErr.takeError();
// Import template parameters.
- TemplateParameterList *TemplateParams
- = ImportTemplateParameterList(D->getTemplateParameters());
- if (!TemplateParams)
- return nullptr;
+ auto TemplateParamsOrErr = ImportTemplateParameterList(
+ D->getTemplateParameters());
+ if (!TemplateParamsOrErr)
+ return TemplateParamsOrErr.takeError();
// FIXME: Import default argument.
TemplateTemplateParmDecl *ToD = nullptr;
(void)GetImportedOrCreateDecl(
ToD, D, Importer.getToContext(),
- Importer.getToContext().getTranslationUnitDecl(), Loc, D->getDepth(),
- D->getPosition(), D->isParameterPack(), Name.getAsIdentifierInfo(),
- TemplateParams);
+ Importer.getToContext().getTranslationUnitDecl(), *LocationOrErr,
+ D->getDepth(), D->getPosition(), D->isParameterPack(),
+ (*NameOrErr).getAsIdentifierInfo(),
+ *TemplateParamsOrErr);
return ToD;
}
@@ -4418,7 +4987,7 @@ static ClassTemplateDecl *getDefinition(ClassTemplateDecl *D) {
return TemplateWithDef;
}
-Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
bool IsFriend = D->getFriendObjectKind() != Decl::FOK_None;
// If this record has a definition in the translation unit we're coming from,
@@ -4427,12 +4996,11 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
auto *Definition =
cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition());
if (Definition && Definition != D->getTemplatedDecl() && !IsFriend) {
- Decl *ImportedDef
- = Importer.Import(Definition->getDescribedClassTemplate());
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(
+ Definition->getDescribedClassTemplate()))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
// Import the major distinguishing characteristics of this class template.
@@ -4440,8 +5008,8 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -4490,26 +5058,25 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
}
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
CXXRecordDecl *FromTemplated = D->getTemplatedDecl();
// Create the declaration that is being templated.
- auto *ToTemplated = cast_or_null<CXXRecordDecl>(
- Importer.Import(FromTemplated));
- if (!ToTemplated)
- return nullptr;
+ CXXRecordDecl *ToTemplated;
+ if (Error Err = importInto(ToTemplated, FromTemplated))
+ return std::move(Err);
// Create the class template declaration itself.
- TemplateParameterList *TemplateParams =
- ImportTemplateParameterList(D->getTemplateParameters());
- if (!TemplateParams)
- return nullptr;
+ auto TemplateParamsOrErr = ImportTemplateParameterList(
+ D->getTemplateParameters());
+ if (!TemplateParamsOrErr)
+ return TemplateParamsOrErr.takeError();
ClassTemplateDecl *D2;
if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, Loc, Name,
- TemplateParams, ToTemplated))
+ *TemplateParamsOrErr, ToTemplated))
return D2;
ToTemplated->setDescribedClassTemplate(D2);
@@ -4534,48 +5101,33 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
return D2;
}
-Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
+ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D) {
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
TagDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(Definition))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
- auto *ClassTemplate =
- cast_or_null<ClassTemplateDecl>(Importer.Import(
- D->getSpecializedTemplate()));
- if (!ClassTemplate)
- return nullptr;
+ ClassTemplateDecl *ClassTemplate;
+ if (Error Err = importInto(ClassTemplate, D->getSpecializedTemplate()))
+ return std::move(Err);
// Import the context of this declaration.
- DeclContext *DC = ClassTemplate->getDeclContext();
- if (!DC)
- return nullptr;
-
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- }
-
- // Import the location of this declaration.
- SourceLocation StartLoc = Importer.Import(D->getBeginLoc());
- SourceLocation IdLoc = Importer.Import(D->getLocation());
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
// Import template arguments.
SmallVector<TemplateArgument, 2> TemplateArgs;
- if (ImportTemplateArguments(D->getTemplateArgs().data(),
- D->getTemplateArgs().size(),
- TemplateArgs))
- return nullptr;
+ if (Error Err = ImportTemplateArguments(
+ D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs))
+ return std::move(Err);
// Try to find an existing specialization with these template arguments.
void *InsertPos = nullptr;
@@ -4602,13 +5154,21 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// Import those those default field initializers which have been
// instantiated in the "From" context, but not in the "To" context.
- for (auto *FromField : D->fields())
- Importer.Import(FromField);
+ for (auto *FromField : D->fields()) {
+ auto ToOrErr = import(FromField);
+ if (!ToOrErr)
+ // FIXME: return the error?
+ consumeError(ToOrErr.takeError());
+ }
// Import those methods which have been instantiated in the
// "From" context, but not in the "To" context.
- for (CXXMethodDecl *FromM : D->methods())
- Importer.Import(FromM);
+ for (CXXMethodDecl *FromM : D->methods()) {
+ auto ToOrErr = import(FromM);
+ if (!ToOrErr)
+ // FIXME: return the error?
+ consumeError(ToOrErr.takeError());
+ }
// TODO Import instantiated default arguments.
// TODO Import instantiated exception specifications.
@@ -4621,27 +5181,36 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
} else { // We either couldn't find any previous specialization in the "To"
// context, or we found one but without definition. Let's create a
// new specialization and register that at the class template.
+
+ // Import the location of this declaration.
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+ ExpectedSLoc IdLocOrErr = import(D->getLocation());
+ if (!IdLocOrErr)
+ return IdLocOrErr.takeError();
+
if (PartialSpec) {
// Import TemplateArgumentListInfo.
TemplateArgumentListInfo ToTAInfo;
const auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten();
- if (ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo))
- return nullptr;
+ if (Error Err = ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo))
+ return std::move(Err);
- QualType CanonInjType = Importer.Import(
- PartialSpec->getInjectedSpecializationType());
- if (CanonInjType.isNull())
- return nullptr;
+ QualType CanonInjType;
+ if (Error Err = importInto(
+ CanonInjType, PartialSpec->getInjectedSpecializationType()))
+ return std::move(Err);
CanonInjType = CanonInjType.getCanonicalType();
- TemplateParameterList *ToTPList = ImportTemplateParameterList(
- PartialSpec->getTemplateParameters());
- if (!ToTPList && PartialSpec->getTemplateParameters())
- return nullptr;
+ auto ToTPListOrErr = ImportTemplateParameterList(
+ PartialSpec->getTemplateParameters());
+ if (!ToTPListOrErr)
+ return ToTPListOrErr.takeError();
if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>(
- D2, D, Importer.getToContext(), D->getTagKind(), DC, StartLoc,
- IdLoc, ToTPList, ClassTemplate,
+ D2, D, Importer.getToContext(), D->getTagKind(), DC,
+ *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr, ClassTemplate,
llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()),
ToTAInfo, CanonInjType,
cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl)))
@@ -4656,8 +5225,9 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
} else { // Not a partial specialization.
if (GetImportedOrCreateDecl(
- D2, D, Importer.getToContext(), D->getTagKind(), DC, StartLoc,
- IdLoc, ClassTemplate, TemplateArgs, PrevDecl))
+ D2, D, Importer.getToContext(), D->getTagKind(), DC,
+ *BeginLocOrErr, *IdLocOrErr, ClassTemplate, TemplateArgs,
+ PrevDecl))
return D2;
// Update InsertPos, because preceding import calls may have invalidated
@@ -4670,22 +5240,34 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
D2->setSpecializationKind(D->getSpecializationKind());
// Import the qualifier, if any.
- D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ if (auto LocOrErr = import(D->getQualifierLoc()))
+ D2->setQualifierInfo(*LocOrErr);
+ else
+ return LocOrErr.takeError();
if (auto *TSI = D->getTypeAsWritten()) {
- TypeSourceInfo *TInfo = Importer.Import(TSI);
- if (!TInfo)
- return nullptr;
- D2->setTypeAsWritten(TInfo);
- D2->setTemplateKeywordLoc(Importer.Import(D->getTemplateKeywordLoc()));
- D2->setExternLoc(Importer.Import(D->getExternLoc()));
+ if (auto TInfoOrErr = import(TSI))
+ D2->setTypeAsWritten(*TInfoOrErr);
+ else
+ return TInfoOrErr.takeError();
+
+ if (auto LocOrErr = import(D->getTemplateKeywordLoc()))
+ D2->setTemplateKeywordLoc(*LocOrErr);
+ else
+ return LocOrErr.takeError();
+
+ if (auto LocOrErr = import(D->getExternLoc()))
+ D2->setExternLoc(*LocOrErr);
+ else
+ return LocOrErr.takeError();
}
- SourceLocation POI = Importer.Import(D->getPointOfInstantiation());
- if (POI.isValid())
- D2->setPointOfInstantiation(POI);
- else if (D->getPointOfInstantiation().isValid())
- return nullptr;
+ if (D->getPointOfInstantiation().isValid()) {
+ if (auto POIOrErr = import(D->getPointOfInstantiation()))
+ D2->setPointOfInstantiation(*POIOrErr);
+ else
+ return POIOrErr.takeError();
+ }
D2->setTemplateSpecializationKind(D->getTemplateSpecializationKind());
@@ -4697,13 +5279,14 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
LexicalDC->addDeclInternal(D2);
}
}
- if (D->isCompleteDefinition() && ImportDefinition(D, D2))
- return nullptr;
+ if (D->isCompleteDefinition())
+ if (Error Err = ImportDefinition(D, D2))
+ return std::move(Err);
return D2;
}
-Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
// If this variable has a definition in the translation unit we're coming
// from,
// but this particular declaration is not that definition, import the
@@ -4711,11 +5294,11 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
auto *Definition =
cast_or_null<VarDecl>(D->getTemplatedDecl()->getDefinition());
if (Definition && Definition != D->getTemplatedDecl()) {
- Decl *ImportedDef = Importer.Import(Definition->getDescribedVarTemplate());
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(
+ Definition->getDescribedVarTemplate()))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
// Import the major distinguishing characteristics of this variable template.
@@ -4723,8 +5306,8 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -4739,7 +5322,7 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
continue;
Decl *Found = FoundDecl;
- if (auto *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
+ if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
if (IsStructuralMatch(D, FoundTemplate)) {
// The variable templates structurally match; call it the same template.
Importer.MapImported(D->getTemplatedDecl(),
@@ -4758,29 +5341,32 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
}
if (!Name)
- return nullptr;
+ // FIXME: Is it possible to get other error than name conflict?
+ // (Put this `if` into the previous `if`?)
+ return make_error<ImportError>(ImportError::NameConflict);
VarDecl *DTemplated = D->getTemplatedDecl();
// Import the type.
- QualType T = Importer.Import(DTemplated->getType());
- if (T.isNull())
- return nullptr;
+ // FIXME: Value not used?
+ ExpectedType TypeOrErr = import(DTemplated->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
// Create the declaration that is being templated.
- auto *ToTemplated = dyn_cast_or_null<VarDecl>(Importer.Import(DTemplated));
- if (!ToTemplated)
- return nullptr;
+ VarDecl *ToTemplated;
+ if (Error Err = importInto(ToTemplated, DTemplated))
+ return std::move(Err);
// Create the variable template declaration itself.
- TemplateParameterList *TemplateParams =
- ImportTemplateParameterList(D->getTemplateParameters());
- if (!TemplateParams)
- return nullptr;
+ auto TemplateParamsOrErr = ImportTemplateParameterList(
+ D->getTemplateParameters());
+ if (!TemplateParamsOrErr)
+ return TemplateParamsOrErr.takeError();
VarTemplateDecl *ToVarTD;
if (GetImportedOrCreateDecl(ToVarTD, D, Importer.getToContext(), DC, Loc,
- Name, TemplateParams, ToTemplated))
+ Name, *TemplateParamsOrErr, ToTemplated))
return ToVarTD;
ToTemplated->setDescribedVarTemplate(ToVarTD);
@@ -4797,46 +5383,42 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
return ToVarTD;
}
-Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
+ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *D) {
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
VarDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(Definition))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
- auto *VarTemplate = cast_or_null<VarTemplateDecl>(
- Importer.Import(D->getSpecializedTemplate()));
- if (!VarTemplate)
- return nullptr;
+ VarTemplateDecl *VarTemplate;
+ if (Error Err = importInto(VarTemplate, D->getSpecializedTemplate()))
+ return std::move(Err);
// Import the context of this declaration.
- DeclContext *DC = VarTemplate->getDeclContext();
- if (!DC)
- return nullptr;
-
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- }
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
// Import the location of this declaration.
- SourceLocation StartLoc = Importer.Import(D->getBeginLoc());
- SourceLocation IdLoc = Importer.Import(D->getLocation());
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
+ auto IdLocOrErr = import(D->getLocation());
+ if (!IdLocOrErr)
+ return IdLocOrErr.takeError();
// Import template arguments.
SmallVector<TemplateArgument, 2> TemplateArgs;
- if (ImportTemplateArguments(D->getTemplateArgs().data(),
- D->getTemplateArgs().size(), TemplateArgs))
- return nullptr;
+ if (Error Err = ImportTemplateArguments(
+ D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs))
+ return std::move(Err);
// Try to find an existing specialization with these template arguments.
void *InsertPos = nullptr;
@@ -4859,17 +5441,18 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
}
} else {
// Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ QualType T;
+ if (Error Err = importInto(T, D->getType()))
+ return std::move(Err);
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- if (D->getTypeSourceInfo() && !TInfo)
- return nullptr;
+ auto TInfoOrErr = import(D->getTypeSourceInfo());
+ if (!TInfoOrErr)
+ return TInfoOrErr.takeError();
TemplateArgumentListInfo ToTAInfo;
- if (ImportTemplateArgumentListInfo(D->getTemplateArgsInfo(), ToTAInfo))
- return nullptr;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ D->getTemplateArgsInfo(), ToTAInfo))
+ return std::move(Err);
using PartVarSpecDecl = VarTemplatePartialSpecializationDecl;
// Create a new specialization.
@@ -4878,41 +5461,47 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
TemplateArgumentListInfo ArgInfos;
const auto *FromTAArgsAsWritten = FromPartial->getTemplateArgsAsWritten();
// NOTE: FromTAArgsAsWritten and template parameter list are non-null.
- if (ImportTemplateArgumentListInfo(*FromTAArgsAsWritten, ArgInfos))
- return nullptr;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ *FromTAArgsAsWritten, ArgInfos))
+ return std::move(Err);
- TemplateParameterList *ToTPList = ImportTemplateParameterList(
- FromPartial->getTemplateParameters());
- if (!ToTPList)
- return nullptr;
+ auto ToTPListOrErr = ImportTemplateParameterList(
+ FromPartial->getTemplateParameters());
+ if (!ToTPListOrErr)
+ return ToTPListOrErr.takeError();
PartVarSpecDecl *ToPartial;
if (GetImportedOrCreateDecl(ToPartial, D, Importer.getToContext(), DC,
- StartLoc, IdLoc, ToTPList, VarTemplate, T,
- TInfo, D->getStorageClass(), TemplateArgs,
- ArgInfos))
+ *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr,
+ VarTemplate, T, *TInfoOrErr,
+ D->getStorageClass(), TemplateArgs, ArgInfos))
return ToPartial;
- auto *FromInst = FromPartial->getInstantiatedFromMember();
- auto *ToInst = cast_or_null<PartVarSpecDecl>(Importer.Import(FromInst));
- if (FromInst && !ToInst)
- return nullptr;
+ if (Expected<PartVarSpecDecl *> ToInstOrErr = import(
+ FromPartial->getInstantiatedFromMember()))
+ ToPartial->setInstantiatedFromMember(*ToInstOrErr);
+ else
+ return ToInstOrErr.takeError();
- ToPartial->setInstantiatedFromMember(ToInst);
if (FromPartial->isMemberSpecialization())
ToPartial->setMemberSpecialization();
D2 = ToPartial;
+
} else { // Full specialization
- if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, StartLoc,
- IdLoc, VarTemplate, T, TInfo,
+ if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC,
+ *BeginLocOrErr, *IdLocOrErr, VarTemplate,
+ T, *TInfoOrErr,
D->getStorageClass(), TemplateArgs))
return D2;
}
- SourceLocation POI = D->getPointOfInstantiation();
- if (POI.isValid())
- D2->setPointOfInstantiation(Importer.Import(POI));
+ if (D->getPointOfInstantiation().isValid()) {
+ if (ExpectedSLoc POIOrErr = import(D->getPointOfInstantiation()))
+ D2->setPointOfInstantiation(*POIOrErr);
+ else
+ return POIOrErr.takeError();
+ }
D2->setSpecializationKind(D->getSpecializationKind());
D2->setTemplateArgsInfo(ToTAInfo);
@@ -4921,7 +5510,10 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
VarTemplate->AddSpecialization(D2, InsertPos);
// Import the qualifier, if any.
- D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ if (auto LocOrErr = import(D->getQualifierLoc()))
+ D2->setQualifierInfo(*LocOrErr);
+ else
+ return LocOrErr.takeError();
if (D->isConstexpr())
D2->setConstexpr(true);
@@ -4933,20 +5525,21 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
D2->setAccess(D->getAccess());
}
- if (ImportInitializer(D, D2))
- return nullptr;
+ if (Error Err = ImportInitializer(D, D2))
+ return std::move(Err);
return D2;
}
-Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ExpectedDecl
+ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -4961,7 +5554,8 @@ Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (auto *FoundFunction = dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
+ if (auto *FoundFunction =
+ dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
if (FoundFunction->hasExternalFormalLinkage() &&
D->hasExternalFormalLinkage()) {
if (IsStructuralMatch(D, FoundFunction)) {
@@ -4971,22 +5565,22 @@ Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
}
}
}
+ // TODO: handle conflicting names
}
}
- TemplateParameterList *Params =
- ImportTemplateParameterList(D->getTemplateParameters());
- if (!Params)
- return nullptr;
+ auto ParamsOrErr = ImportTemplateParameterList(
+ D->getTemplateParameters());
+ if (!ParamsOrErr)
+ return ParamsOrErr.takeError();
- auto *TemplatedFD =
- cast_or_null<FunctionDecl>(Importer.Import(D->getTemplatedDecl()));
- if (!TemplatedFD)
- return nullptr;
+ FunctionDecl *TemplatedFD;
+ if (Error Err = importInto(TemplatedFD, D->getTemplatedDecl()))
+ return std::move(Err);
FunctionTemplateDecl *ToFunc;
if (GetImportedOrCreateDecl(ToFunc, D, Importer.getToContext(), DC, Loc, Name,
- Params, TemplatedFD))
+ *ParamsOrErr, TemplatedFD))
return ToFunc;
TemplatedFD->setDescribedFunctionTemplate(ToFunc);
@@ -5001,2012 +5595,2084 @@ Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
// Import Statements
//----------------------------------------------------------------------------
-DeclGroupRef ASTNodeImporter::ImportDeclGroup(DeclGroupRef DG) {
- if (DG.isNull())
- return DeclGroupRef::Create(Importer.getToContext(), nullptr, 0);
- size_t NumDecls = DG.end() - DG.begin();
- SmallVector<Decl *, 1> ToDecls(NumDecls);
- auto &_Importer = this->Importer;
- std::transform(DG.begin(), DG.end(), ToDecls.begin(),
- [&_Importer](Decl *D) -> Decl * {
- return _Importer.Import(D);
- });
- return DeclGroupRef::Create(Importer.getToContext(),
- ToDecls.begin(),
- NumDecls);
-}
-
-Stmt *ASTNodeImporter::VisitStmt(Stmt *S) {
+ExpectedStmt ASTNodeImporter::VisitStmt(Stmt *S) {
Importer.FromDiag(S->getBeginLoc(), diag::err_unsupported_ast_node)
<< S->getStmtClassName();
- return nullptr;
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
-Stmt *ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
+
+ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
SmallVector<IdentifierInfo *, 4> Names;
for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) {
IdentifierInfo *ToII = Importer.Import(S->getOutputIdentifier(I));
// ToII is nullptr when no symbolic name is given for output operand
// see ParseStmtAsm::ParseAsmOperandsOpt
- if (!ToII && S->getOutputIdentifier(I))
- return nullptr;
Names.push_back(ToII);
}
+
for (unsigned I = 0, E = S->getNumInputs(); I != E; I++) {
IdentifierInfo *ToII = Importer.Import(S->getInputIdentifier(I));
// ToII is nullptr when no symbolic name is given for input operand
// see ParseStmtAsm::ParseAsmOperandsOpt
- if (!ToII && S->getInputIdentifier(I))
- return nullptr;
Names.push_back(ToII);
}
SmallVector<StringLiteral *, 4> Clobbers;
for (unsigned I = 0, E = S->getNumClobbers(); I != E; I++) {
- auto *Clobber = cast_or_null<StringLiteral>(
- Importer.Import(S->getClobberStringLiteral(I)));
- if (!Clobber)
- return nullptr;
- Clobbers.push_back(Clobber);
+ if (auto ClobberOrErr = import(S->getClobberStringLiteral(I)))
+ Clobbers.push_back(*ClobberOrErr);
+ else
+ return ClobberOrErr.takeError();
+
}
SmallVector<StringLiteral *, 4> Constraints;
for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) {
- auto *Output = cast_or_null<StringLiteral>(
- Importer.Import(S->getOutputConstraintLiteral(I)));
- if (!Output)
- return nullptr;
- Constraints.push_back(Output);
+ if (auto OutputOrErr = import(S->getOutputConstraintLiteral(I)))
+ Constraints.push_back(*OutputOrErr);
+ else
+ return OutputOrErr.takeError();
}
for (unsigned I = 0, E = S->getNumInputs(); I != E; I++) {
- auto *Input = cast_or_null<StringLiteral>(
- Importer.Import(S->getInputConstraintLiteral(I)));
- if (!Input)
- return nullptr;
- Constraints.push_back(Input);
+ if (auto InputOrErr = import(S->getInputConstraintLiteral(I)))
+ Constraints.push_back(*InputOrErr);
+ else
+ return InputOrErr.takeError();
}
SmallVector<Expr *, 4> Exprs(S->getNumOutputs() + S->getNumInputs());
- if (ImportContainerChecked(S->outputs(), Exprs))
- return nullptr;
-
- if (ImportArrayChecked(S->inputs(), Exprs.begin() + S->getNumOutputs()))
- return nullptr;
-
- auto *AsmStr = cast_or_null<StringLiteral>(
- Importer.Import(S->getAsmString()));
- if (!AsmStr)
- return nullptr;
+ if (Error Err = ImportContainerChecked(S->outputs(), Exprs))
+ return std::move(Err);
+
+ if (Error Err = ImportArrayChecked(
+ S->inputs(), Exprs.begin() + S->getNumOutputs()))
+ return std::move(Err);
+
+ ExpectedSLoc AsmLocOrErr = import(S->getAsmLoc());
+ if (!AsmLocOrErr)
+ return AsmLocOrErr.takeError();
+ auto AsmStrOrErr = import(S->getAsmString());
+ if (!AsmStrOrErr)
+ return AsmStrOrErr.takeError();
+ ExpectedSLoc RParenLocOrErr = import(S->getRParenLoc());
+ if (!RParenLocOrErr)
+ return RParenLocOrErr.takeError();
return new (Importer.getToContext()) GCCAsmStmt(
- Importer.getToContext(),
- Importer.Import(S->getAsmLoc()),
- S->isSimple(),
- S->isVolatile(),
- S->getNumOutputs(),
- S->getNumInputs(),
- Names.data(),
- Constraints.data(),
- Exprs.data(),
- AsmStr,
- S->getNumClobbers(),
- Clobbers.data(),
- Importer.Import(S->getRParenLoc()));
-}
-
-Stmt *ASTNodeImporter::VisitDeclStmt(DeclStmt *S) {
- DeclGroupRef ToDG = ImportDeclGroup(S->getDeclGroup());
- for (auto *ToD : ToDG) {
- if (!ToD)
- return nullptr;
- }
- SourceLocation ToStartLoc = Importer.Import(S->getBeginLoc());
- SourceLocation ToEndLoc = Importer.Import(S->getEndLoc());
- return new (Importer.getToContext()) DeclStmt(ToDG, ToStartLoc, ToEndLoc);
-}
+ Importer.getToContext(),
+ *AsmLocOrErr,
+ S->isSimple(),
+ S->isVolatile(),
+ S->getNumOutputs(),
+ S->getNumInputs(),
+ Names.data(),
+ Constraints.data(),
+ Exprs.data(),
+ *AsmStrOrErr,
+ S->getNumClobbers(),
+ Clobbers.data(),
+ *RParenLocOrErr);
+}
+
+ExpectedStmt ASTNodeImporter::VisitDeclStmt(DeclStmt *S) {
+ auto Imp = importSeq(S->getDeclGroup(), S->getBeginLoc(), S->getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ DeclGroupRef ToDG;
+ SourceLocation ToBeginLoc, ToEndLoc;
+ std::tie(ToDG, ToBeginLoc, ToEndLoc) = *Imp;
+
+ return new (Importer.getToContext()) DeclStmt(ToDG, ToBeginLoc, ToEndLoc);
+}
+
+ExpectedStmt ASTNodeImporter::VisitNullStmt(NullStmt *S) {
+ ExpectedSLoc ToSemiLocOrErr = import(S->getSemiLoc());
+ if (!ToSemiLocOrErr)
+ return ToSemiLocOrErr.takeError();
+ return new (Importer.getToContext()) NullStmt(
+ *ToSemiLocOrErr, S->hasLeadingEmptyMacro());
+}
+
+ExpectedStmt ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
+ SmallVector<Stmt *, 8> ToStmts(S->size());
-Stmt *ASTNodeImporter::VisitNullStmt(NullStmt *S) {
- SourceLocation ToSemiLoc = Importer.Import(S->getSemiLoc());
- return new (Importer.getToContext()) NullStmt(ToSemiLoc,
- S->hasLeadingEmptyMacro());
-}
+ if (Error Err = ImportContainerChecked(S->body(), ToStmts))
+ return std::move(Err);
-Stmt *ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
- SmallVector<Stmt *, 8> ToStmts(S->size());
+ ExpectedSLoc ToLBracLocOrErr = import(S->getLBracLoc());
+ if (!ToLBracLocOrErr)
+ return ToLBracLocOrErr.takeError();
- if (ImportContainerChecked(S->body(), ToStmts))
- return nullptr;
+ ExpectedSLoc ToRBracLocOrErr = import(S->getRBracLoc());
+ if (!ToRBracLocOrErr)
+ return ToRBracLocOrErr.takeError();
- SourceLocation ToLBraceLoc = Importer.Import(S->getLBracLoc());
- SourceLocation ToRBraceLoc = Importer.Import(S->getRBracLoc());
- return CompoundStmt::Create(Importer.getToContext(), ToStmts, ToLBraceLoc,
- ToRBraceLoc);
+ return CompoundStmt::Create(
+ Importer.getToContext(), ToStmts,
+ *ToLBracLocOrErr, *ToRBracLocOrErr);
}
-Stmt *ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
- Expr *ToLHS = Importer.Import(S->getLHS());
- if (!ToLHS)
- return nullptr;
- Expr *ToRHS = Importer.Import(S->getRHS());
- if (!ToRHS && S->getRHS())
- return nullptr;
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- SourceLocation ToCaseLoc = Importer.Import(S->getCaseLoc());
- SourceLocation ToEllipsisLoc = Importer.Import(S->getEllipsisLoc());
- SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
- auto *ToStmt = new (Importer.getToContext())
- CaseStmt(ToLHS, ToRHS, ToCaseLoc, ToEllipsisLoc, ToColonLoc);
+ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
+ auto Imp = importSeq(
+ S->getLHS(), S->getRHS(), S->getSubStmt(), S->getCaseLoc(),
+ S->getEllipsisLoc(), S->getColonLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Expr *ToLHS, *ToRHS;
+ Stmt *ToSubStmt;
+ SourceLocation ToCaseLoc, ToEllipsisLoc, ToColonLoc;
+ std::tie(ToLHS, ToRHS, ToSubStmt, ToCaseLoc, ToEllipsisLoc, ToColonLoc) =
+ *Imp;
+
+ auto *ToStmt = CaseStmt::Create(Importer.getToContext(), ToLHS, ToRHS,
+ ToCaseLoc, ToEllipsisLoc, ToColonLoc);
ToStmt->setSubStmt(ToSubStmt);
+
return ToStmt;
}
-Stmt *ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) {
- SourceLocation ToDefaultLoc = Importer.Import(S->getDefaultLoc());
- SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- return new (Importer.getToContext()) DefaultStmt(ToDefaultLoc, ToColonLoc,
- ToSubStmt);
-}
+ExpectedStmt ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) {
+ auto Imp = importSeq(S->getDefaultLoc(), S->getColonLoc(), S->getSubStmt());
+ if (!Imp)
+ return Imp.takeError();
-Stmt *ASTNodeImporter::VisitLabelStmt(LabelStmt *S) {
- SourceLocation ToIdentLoc = Importer.Import(S->getIdentLoc());
- auto *ToLabelDecl = cast_or_null<LabelDecl>(Importer.Import(S->getDecl()));
- if (!ToLabelDecl && S->getDecl())
- return nullptr;
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- return new (Importer.getToContext()) LabelStmt(ToIdentLoc, ToLabelDecl,
- ToSubStmt);
-}
+ SourceLocation ToDefaultLoc, ToColonLoc;
+ Stmt *ToSubStmt;
+ std::tie(ToDefaultLoc, ToColonLoc, ToSubStmt) = *Imp;
-Stmt *ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) {
- SourceLocation ToAttrLoc = Importer.Import(S->getAttrLoc());
- ArrayRef<const Attr*> FromAttrs(S->getAttrs());
- SmallVector<const Attr *, 1> ToAttrs(FromAttrs.size());
- if (ImportContainerChecked(FromAttrs, ToAttrs))
- return nullptr;
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- return AttributedStmt::Create(Importer.getToContext(), ToAttrLoc,
- ToAttrs, ToSubStmt);
+ return new (Importer.getToContext()) DefaultStmt(
+ ToDefaultLoc, ToColonLoc, ToSubStmt);
}
-Stmt *ASTNodeImporter::VisitIfStmt(IfStmt *S) {
- SourceLocation ToIfLoc = Importer.Import(S->getIfLoc());
- Stmt *ToInit = Importer.Import(S->getInit());
- if (!ToInit && S->getInit())
- return nullptr;
- VarDecl *ToConditionVariable = nullptr;
- if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
- ToConditionVariable =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
- if (!ToConditionVariable)
- return nullptr;
- }
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- Stmt *ToThenStmt = Importer.Import(S->getThen());
- if (!ToThenStmt && S->getThen())
- return nullptr;
- SourceLocation ToElseLoc = Importer.Import(S->getElseLoc());
- Stmt *ToElseStmt = Importer.Import(S->getElse());
- if (!ToElseStmt && S->getElse())
- return nullptr;
- return new (Importer.getToContext()) IfStmt(Importer.getToContext(),
- ToIfLoc, S->isConstexpr(),
- ToInit,
- ToConditionVariable,
- ToCondition, ToThenStmt,
- ToElseLoc, ToElseStmt);
+ExpectedStmt ASTNodeImporter::VisitLabelStmt(LabelStmt *S) {
+ auto Imp = importSeq(S->getIdentLoc(), S->getDecl(), S->getSubStmt());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToIdentLoc;
+ LabelDecl *ToLabelDecl;
+ Stmt *ToSubStmt;
+ std::tie(ToIdentLoc, ToLabelDecl, ToSubStmt) = *Imp;
+
+ return new (Importer.getToContext()) LabelStmt(
+ ToIdentLoc, ToLabelDecl, ToSubStmt);
}
-Stmt *ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
- Stmt *ToInit = Importer.Import(S->getInit());
- if (!ToInit && S->getInit())
- return nullptr;
- VarDecl *ToConditionVariable = nullptr;
- if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
- ToConditionVariable =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
- if (!ToConditionVariable)
- return nullptr;
- }
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- auto *ToStmt = new (Importer.getToContext()) SwitchStmt(
- Importer.getToContext(), ToInit,
- ToConditionVariable, ToCondition);
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) {
+ ExpectedSLoc ToAttrLocOrErr = import(S->getAttrLoc());
+ if (!ToAttrLocOrErr)
+ return ToAttrLocOrErr.takeError();
+ ArrayRef<const Attr*> FromAttrs(S->getAttrs());
+ SmallVector<const Attr *, 1> ToAttrs(FromAttrs.size());
+ if (Error Err = ImportContainerChecked(FromAttrs, ToAttrs))
+ return std::move(Err);
+ ExpectedStmt ToSubStmtOrErr = import(S->getSubStmt());
+ if (!ToSubStmtOrErr)
+ return ToSubStmtOrErr.takeError();
+
+ return AttributedStmt::Create(
+ Importer.getToContext(), *ToAttrLocOrErr, ToAttrs, *ToSubStmtOrErr);
+}
+
+ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) {
+ auto Imp = importSeq(
+ S->getIfLoc(), S->getInit(), S->getConditionVariable(), S->getCond(),
+ S->getThen(), S->getElseLoc(), S->getElse());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToIfLoc, ToElseLoc;
+ Stmt *ToInit, *ToThen, *ToElse;
+ VarDecl *ToConditionVariable;
+ Expr *ToCond;
+ std::tie(
+ ToIfLoc, ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc, ToElse) =
+ *Imp;
+
+ return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->isConstexpr(),
+ ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc,
+ ToElse);
+}
+
+ExpectedStmt ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
+ auto Imp = importSeq(
+ S->getInit(), S->getConditionVariable(), S->getCond(),
+ S->getBody(), S->getSwitchLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Stmt *ToInit, *ToBody;
+ VarDecl *ToConditionVariable;
+ Expr *ToCond;
+ SourceLocation ToSwitchLoc;
+ std::tie(ToInit, ToConditionVariable, ToCond, ToBody, ToSwitchLoc) = *Imp;
+
+ auto *ToStmt = SwitchStmt::Create(Importer.getToContext(), ToInit,
+ ToConditionVariable, ToCond);
ToStmt->setBody(ToBody);
- ToStmt->setSwitchLoc(Importer.Import(S->getSwitchLoc()));
+ ToStmt->setSwitchLoc(ToSwitchLoc);
+
// Now we have to re-chain the cases.
SwitchCase *LastChainedSwitchCase = nullptr;
for (SwitchCase *SC = S->getSwitchCaseList(); SC != nullptr;
SC = SC->getNextSwitchCase()) {
- auto *ToSC = dyn_cast_or_null<SwitchCase>(Importer.Import(SC));
- if (!ToSC)
- return nullptr;
+ Expected<SwitchCase *> ToSCOrErr = import(SC);
+ if (!ToSCOrErr)
+ return ToSCOrErr.takeError();
if (LastChainedSwitchCase)
- LastChainedSwitchCase->setNextSwitchCase(ToSC);
+ LastChainedSwitchCase->setNextSwitchCase(*ToSCOrErr);
else
- ToStmt->setSwitchCaseList(ToSC);
- LastChainedSwitchCase = ToSC;
+ ToStmt->setSwitchCaseList(*ToSCOrErr);
+ LastChainedSwitchCase = *ToSCOrErr;
}
+
return ToStmt;
}
-Stmt *ASTNodeImporter::VisitWhileStmt(WhileStmt *S) {
- VarDecl *ToConditionVariable = nullptr;
- if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
- ToConditionVariable =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
- if (!ToConditionVariable)
- return nullptr;
- }
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc());
- return new (Importer.getToContext()) WhileStmt(Importer.getToContext(),
- ToConditionVariable,
- ToCondition, ToBody,
- ToWhileLoc);
+ExpectedStmt ASTNodeImporter::VisitWhileStmt(WhileStmt *S) {
+ auto Imp = importSeq(
+ S->getConditionVariable(), S->getCond(), S->getBody(), S->getWhileLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ VarDecl *ToConditionVariable;
+ Expr *ToCond;
+ Stmt *ToBody;
+ SourceLocation ToWhileLoc;
+ std::tie(ToConditionVariable, ToCond, ToBody, ToWhileLoc) = *Imp;
+
+ return WhileStmt::Create(Importer.getToContext(), ToConditionVariable, ToCond,
+ ToBody, ToWhileLoc);
}
-Stmt *ASTNodeImporter::VisitDoStmt(DoStmt *S) {
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- SourceLocation ToDoLoc = Importer.Import(S->getDoLoc());
- SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- return new (Importer.getToContext()) DoStmt(ToBody, ToCondition,
- ToDoLoc, ToWhileLoc,
- ToRParenLoc);
+ExpectedStmt ASTNodeImporter::VisitDoStmt(DoStmt *S) {
+ auto Imp = importSeq(
+ S->getBody(), S->getCond(), S->getDoLoc(), S->getWhileLoc(),
+ S->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Stmt *ToBody;
+ Expr *ToCond;
+ SourceLocation ToDoLoc, ToWhileLoc, ToRParenLoc;
+ std::tie(ToBody, ToCond, ToDoLoc, ToWhileLoc, ToRParenLoc) = *Imp;
+
+ return new (Importer.getToContext()) DoStmt(
+ ToBody, ToCond, ToDoLoc, ToWhileLoc, ToRParenLoc);
}
-Stmt *ASTNodeImporter::VisitForStmt(ForStmt *S) {
- Stmt *ToInit = Importer.Import(S->getInit());
- if (!ToInit && S->getInit())
- return nullptr;
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- VarDecl *ToConditionVariable = nullptr;
- if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
- ToConditionVariable =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
- if (!ToConditionVariable)
- return nullptr;
- }
- Expr *ToInc = Importer.Import(S->getInc());
- if (!ToInc && S->getInc())
- return nullptr;
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- SourceLocation ToForLoc = Importer.Import(S->getForLoc());
- SourceLocation ToLParenLoc = Importer.Import(S->getLParenLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- return new (Importer.getToContext()) ForStmt(Importer.getToContext(),
- ToInit, ToCondition,
- ToConditionVariable,
- ToInc, ToBody,
- ToForLoc, ToLParenLoc,
- ToRParenLoc);
-}
-
-Stmt *ASTNodeImporter::VisitGotoStmt(GotoStmt *S) {
- LabelDecl *ToLabel = nullptr;
- if (LabelDecl *FromLabel = S->getLabel()) {
- ToLabel = dyn_cast_or_null<LabelDecl>(Importer.Import(FromLabel));
- if (!ToLabel)
- return nullptr;
- }
- SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc());
- SourceLocation ToLabelLoc = Importer.Import(S->getLabelLoc());
- return new (Importer.getToContext()) GotoStmt(ToLabel,
- ToGotoLoc, ToLabelLoc);
+ExpectedStmt ASTNodeImporter::VisitForStmt(ForStmt *S) {
+ auto Imp = importSeq(
+ S->getInit(), S->getCond(), S->getConditionVariable(), S->getInc(),
+ S->getBody(), S->getForLoc(), S->getLParenLoc(), S->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Stmt *ToInit;
+ Expr *ToCond, *ToInc;
+ VarDecl *ToConditionVariable;
+ Stmt *ToBody;
+ SourceLocation ToForLoc, ToLParenLoc, ToRParenLoc;
+ std::tie(
+ ToInit, ToCond, ToConditionVariable, ToInc, ToBody, ToForLoc,
+ ToLParenLoc, ToRParenLoc) = *Imp;
+
+ return new (Importer.getToContext()) ForStmt(
+ Importer.getToContext(),
+ ToInit, ToCond, ToConditionVariable, ToInc, ToBody, ToForLoc, ToLParenLoc,
+ ToRParenLoc);
}
-Stmt *ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
- SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc());
- SourceLocation ToStarLoc = Importer.Import(S->getStarLoc());
- Expr *ToTarget = Importer.Import(S->getTarget());
- if (!ToTarget && S->getTarget())
- return nullptr;
- return new (Importer.getToContext()) IndirectGotoStmt(ToGotoLoc, ToStarLoc,
- ToTarget);
+ExpectedStmt ASTNodeImporter::VisitGotoStmt(GotoStmt *S) {
+ auto Imp = importSeq(S->getLabel(), S->getGotoLoc(), S->getLabelLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ LabelDecl *ToLabel;
+ SourceLocation ToGotoLoc, ToLabelLoc;
+ std::tie(ToLabel, ToGotoLoc, ToLabelLoc) = *Imp;
+
+ return new (Importer.getToContext()) GotoStmt(
+ ToLabel, ToGotoLoc, ToLabelLoc);
}
-Stmt *ASTNodeImporter::VisitContinueStmt(ContinueStmt *S) {
- SourceLocation ToContinueLoc = Importer.Import(S->getContinueLoc());
- return new (Importer.getToContext()) ContinueStmt(ToContinueLoc);
+ExpectedStmt ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ auto Imp = importSeq(S->getGotoLoc(), S->getStarLoc(), S->getTarget());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToGotoLoc, ToStarLoc;
+ Expr *ToTarget;
+ std::tie(ToGotoLoc, ToStarLoc, ToTarget) = *Imp;
+
+ return new (Importer.getToContext()) IndirectGotoStmt(
+ ToGotoLoc, ToStarLoc, ToTarget);
}
-Stmt *ASTNodeImporter::VisitBreakStmt(BreakStmt *S) {
- SourceLocation ToBreakLoc = Importer.Import(S->getBreakLoc());
- return new (Importer.getToContext()) BreakStmt(ToBreakLoc);
+ExpectedStmt ASTNodeImporter::VisitContinueStmt(ContinueStmt *S) {
+ ExpectedSLoc ToContinueLocOrErr = import(S->getContinueLoc());
+ if (!ToContinueLocOrErr)
+ return ToContinueLocOrErr.takeError();
+ return new (Importer.getToContext()) ContinueStmt(*ToContinueLocOrErr);
}
-Stmt *ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) {
- SourceLocation ToRetLoc = Importer.Import(S->getReturnLoc());
- Expr *ToRetExpr = Importer.Import(S->getRetValue());
- if (!ToRetExpr && S->getRetValue())
- return nullptr;
- auto *NRVOCandidate = const_cast<VarDecl *>(S->getNRVOCandidate());
- auto *ToNRVOCandidate = cast_or_null<VarDecl>(Importer.Import(NRVOCandidate));
- if (!ToNRVOCandidate && NRVOCandidate)
- return nullptr;
- return new (Importer.getToContext()) ReturnStmt(ToRetLoc, ToRetExpr,
- ToNRVOCandidate);
+ExpectedStmt ASTNodeImporter::VisitBreakStmt(BreakStmt *S) {
+ auto ToBreakLocOrErr = import(S->getBreakLoc());
+ if (!ToBreakLocOrErr)
+ return ToBreakLocOrErr.takeError();
+ return new (Importer.getToContext()) BreakStmt(*ToBreakLocOrErr);
}
-Stmt *ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) {
- SourceLocation ToCatchLoc = Importer.Import(S->getCatchLoc());
- VarDecl *ToExceptionDecl = nullptr;
- if (VarDecl *FromExceptionDecl = S->getExceptionDecl()) {
- ToExceptionDecl =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl));
- if (!ToExceptionDecl)
- return nullptr;
- }
- Stmt *ToHandlerBlock = Importer.Import(S->getHandlerBlock());
- if (!ToHandlerBlock && S->getHandlerBlock())
- return nullptr;
- return new (Importer.getToContext()) CXXCatchStmt(ToCatchLoc,
- ToExceptionDecl,
- ToHandlerBlock);
+ExpectedStmt ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) {
+ auto Imp = importSeq(
+ S->getReturnLoc(), S->getRetValue(), S->getNRVOCandidate());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToReturnLoc;
+ Expr *ToRetValue;
+ const VarDecl *ToNRVOCandidate;
+ std::tie(ToReturnLoc, ToRetValue, ToNRVOCandidate) = *Imp;
+
+ return ReturnStmt::Create(Importer.getToContext(), ToReturnLoc, ToRetValue,
+ ToNRVOCandidate);
}
-Stmt *ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
- SourceLocation ToTryLoc = Importer.Import(S->getTryLoc());
- Stmt *ToTryBlock = Importer.Import(S->getTryBlock());
- if (!ToTryBlock && S->getTryBlock())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) {
+ auto Imp = importSeq(
+ S->getCatchLoc(), S->getExceptionDecl(), S->getHandlerBlock());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToCatchLoc;
+ VarDecl *ToExceptionDecl;
+ Stmt *ToHandlerBlock;
+ std::tie(ToCatchLoc, ToExceptionDecl, ToHandlerBlock) = *Imp;
+
+ return new (Importer.getToContext()) CXXCatchStmt (
+ ToCatchLoc, ToExceptionDecl, ToHandlerBlock);
+}
+
+ExpectedStmt ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
+ ExpectedSLoc ToTryLocOrErr = import(S->getTryLoc());
+ if (!ToTryLocOrErr)
+ return ToTryLocOrErr.takeError();
+
+ ExpectedStmt ToTryBlockOrErr = import(S->getTryBlock());
+ if (!ToTryBlockOrErr)
+ return ToTryBlockOrErr.takeError();
+
SmallVector<Stmt *, 1> ToHandlers(S->getNumHandlers());
for (unsigned HI = 0, HE = S->getNumHandlers(); HI != HE; ++HI) {
CXXCatchStmt *FromHandler = S->getHandler(HI);
- if (Stmt *ToHandler = Importer.Import(FromHandler))
- ToHandlers[HI] = ToHandler;
+ if (auto ToHandlerOrErr = import(FromHandler))
+ ToHandlers[HI] = *ToHandlerOrErr;
else
- return nullptr;
- }
- return CXXTryStmt::Create(Importer.getToContext(), ToTryLoc, ToTryBlock,
- ToHandlers);
+ return ToHandlerOrErr.takeError();
+ }
+
+ return CXXTryStmt::Create(
+ Importer.getToContext(), *ToTryLocOrErr,*ToTryBlockOrErr, ToHandlers);
+}
+
+ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ auto Imp1 = importSeq(
+ S->getInit(), S->getRangeStmt(), S->getBeginStmt(), S->getEndStmt(),
+ S->getCond(), S->getInc(), S->getLoopVarStmt(), S->getBody());
+ if (!Imp1)
+ return Imp1.takeError();
+ auto Imp2 = importSeq(
+ S->getForLoc(), S->getCoawaitLoc(), S->getColonLoc(), S->getRParenLoc());
+ if (!Imp2)
+ return Imp2.takeError();
+
+ DeclStmt *ToRangeStmt, *ToBeginStmt, *ToEndStmt, *ToLoopVarStmt;
+ Expr *ToCond, *ToInc;
+ Stmt *ToInit, *ToBody;
+ std::tie(
+ ToInit, ToRangeStmt, ToBeginStmt, ToEndStmt, ToCond, ToInc, ToLoopVarStmt,
+ ToBody) = *Imp1;
+ SourceLocation ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc;
+ std::tie(ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc) = *Imp2;
+
+ return new (Importer.getToContext()) CXXForRangeStmt(
+ ToInit, ToRangeStmt, ToBeginStmt, ToEndStmt, ToCond, ToInc, ToLoopVarStmt,
+ ToBody, ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc);
+}
+
+ExpectedStmt
+ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ auto Imp = importSeq(
+ S->getElement(), S->getCollection(), S->getBody(),
+ S->getForLoc(), S->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Stmt *ToElement, *ToBody;
+ Expr *ToCollection;
+ SourceLocation ToForLoc, ToRParenLoc;
+ std::tie(ToElement, ToCollection, ToBody, ToForLoc, ToRParenLoc) = *Imp;
+
+ return new (Importer.getToContext()) ObjCForCollectionStmt(ToElement,
+ ToCollection,
+ ToBody,
+ ToForLoc,
+ ToRParenLoc);
}
-Stmt *ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
- auto *ToInit = dyn_cast_or_null<Stmt>(Importer.Import(S->getInit()));
- if (!ToInit && S->getInit())
- return nullptr;
- auto *ToRange =
- dyn_cast_or_null<DeclStmt>(Importer.Import(S->getRangeStmt()));
- if (!ToRange && S->getRangeStmt())
- return nullptr;
- auto *ToBegin =
- dyn_cast_or_null<DeclStmt>(Importer.Import(S->getBeginStmt()));
- if (!ToBegin && S->getBeginStmt())
- return nullptr;
- auto *ToEnd =
- dyn_cast_or_null<DeclStmt>(Importer.Import(S->getEndStmt()));
- if (!ToEnd && S->getEndStmt())
- return nullptr;
- Expr *ToCond = Importer.Import(S->getCond());
- if (!ToCond && S->getCond())
- return nullptr;
- Expr *ToInc = Importer.Import(S->getInc());
- if (!ToInc && S->getInc())
- return nullptr;
- auto *ToLoopVar =
- dyn_cast_or_null<DeclStmt>(Importer.Import(S->getLoopVarStmt()));
- if (!ToLoopVar && S->getLoopVarStmt())
- return nullptr;
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- SourceLocation ToForLoc = Importer.Import(S->getForLoc());
- SourceLocation ToCoawaitLoc = Importer.Import(S->getCoawaitLoc());
- SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- return new (Importer.getToContext())
- CXXForRangeStmt(ToInit, ToRange, ToBegin, ToEnd, ToCond, ToInc, ToLoopVar,
- ToBody, ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc);
-}
+ExpectedStmt ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ auto Imp = importSeq(
+ S->getAtCatchLoc(), S->getRParenLoc(), S->getCatchParamDecl(),
+ S->getCatchBody());
+ if (!Imp)
+ return Imp.takeError();
-Stmt *ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
- Stmt *ToElem = Importer.Import(S->getElement());
- if (!ToElem && S->getElement())
- return nullptr;
- Expr *ToCollect = Importer.Import(S->getCollection());
- if (!ToCollect && S->getCollection())
- return nullptr;
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- SourceLocation ToForLoc = Importer.Import(S->getForLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- return new (Importer.getToContext()) ObjCForCollectionStmt(ToElem,
- ToCollect,
- ToBody, ToForLoc,
- ToRParenLoc);
-}
+ SourceLocation ToAtCatchLoc, ToRParenLoc;
+ VarDecl *ToCatchParamDecl;
+ Stmt *ToCatchBody;
+ std::tie(ToAtCatchLoc, ToRParenLoc, ToCatchParamDecl, ToCatchBody) = *Imp;
-Stmt *ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
- SourceLocation ToAtCatchLoc = Importer.Import(S->getAtCatchLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- VarDecl *ToExceptionDecl = nullptr;
- if (VarDecl *FromExceptionDecl = S->getCatchParamDecl()) {
- ToExceptionDecl =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl));
- if (!ToExceptionDecl)
- return nullptr;
- }
- Stmt *ToBody = Importer.Import(S->getCatchBody());
- if (!ToBody && S->getCatchBody())
- return nullptr;
- return new (Importer.getToContext()) ObjCAtCatchStmt(ToAtCatchLoc,
- ToRParenLoc,
- ToExceptionDecl,
- ToBody);
+ return new (Importer.getToContext()) ObjCAtCatchStmt (
+ ToAtCatchLoc, ToRParenLoc, ToCatchParamDecl, ToCatchBody);
}
-Stmt *ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
- SourceLocation ToAtFinallyLoc = Importer.Import(S->getAtFinallyLoc());
- Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyBody());
- if (!ToAtFinallyStmt && S->getFinallyBody())
- return nullptr;
- return new (Importer.getToContext()) ObjCAtFinallyStmt(ToAtFinallyLoc,
- ToAtFinallyStmt);
+ExpectedStmt ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ ExpectedSLoc ToAtFinallyLocOrErr = import(S->getAtFinallyLoc());
+ if (!ToAtFinallyLocOrErr)
+ return ToAtFinallyLocOrErr.takeError();
+ ExpectedStmt ToAtFinallyStmtOrErr = import(S->getFinallyBody());
+ if (!ToAtFinallyStmtOrErr)
+ return ToAtFinallyStmtOrErr.takeError();
+ return new (Importer.getToContext()) ObjCAtFinallyStmt(*ToAtFinallyLocOrErr,
+ *ToAtFinallyStmtOrErr);
}
-Stmt *ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
- SourceLocation ToAtTryLoc = Importer.Import(S->getAtTryLoc());
- Stmt *ToAtTryStmt = Importer.Import(S->getTryBody());
- if (!ToAtTryStmt && S->getTryBody())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ auto Imp = importSeq(
+ S->getAtTryLoc(), S->getTryBody(), S->getFinallyStmt());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToAtTryLoc;
+ Stmt *ToTryBody, *ToFinallyStmt;
+ std::tie(ToAtTryLoc, ToTryBody, ToFinallyStmt) = *Imp;
+
SmallVector<Stmt *, 1> ToCatchStmts(S->getNumCatchStmts());
for (unsigned CI = 0, CE = S->getNumCatchStmts(); CI != CE; ++CI) {
ObjCAtCatchStmt *FromCatchStmt = S->getCatchStmt(CI);
- if (Stmt *ToCatchStmt = Importer.Import(FromCatchStmt))
- ToCatchStmts[CI] = ToCatchStmt;
+ if (ExpectedStmt ToCatchStmtOrErr = import(FromCatchStmt))
+ ToCatchStmts[CI] = *ToCatchStmtOrErr;
else
- return nullptr;
+ return ToCatchStmtOrErr.takeError();
}
- Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyStmt());
- if (!ToAtFinallyStmt && S->getFinallyStmt())
- return nullptr;
+
return ObjCAtTryStmt::Create(Importer.getToContext(),
- ToAtTryLoc, ToAtTryStmt,
+ ToAtTryLoc, ToTryBody,
ToCatchStmts.begin(), ToCatchStmts.size(),
- ToAtFinallyStmt);
+ ToFinallyStmt);
}
-Stmt *ASTNodeImporter::VisitObjCAtSynchronizedStmt
+ExpectedStmt ASTNodeImporter::VisitObjCAtSynchronizedStmt
(ObjCAtSynchronizedStmt *S) {
- SourceLocation ToAtSynchronizedLoc =
- Importer.Import(S->getAtSynchronizedLoc());
- Expr *ToSynchExpr = Importer.Import(S->getSynchExpr());
- if (!ToSynchExpr && S->getSynchExpr())
- return nullptr;
- Stmt *ToSynchBody = Importer.Import(S->getSynchBody());
- if (!ToSynchBody && S->getSynchBody())
- return nullptr;
+ auto Imp = importSeq(
+ S->getAtSynchronizedLoc(), S->getSynchExpr(), S->getSynchBody());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToAtSynchronizedLoc;
+ Expr *ToSynchExpr;
+ Stmt *ToSynchBody;
+ std::tie(ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody) = *Imp;
+
return new (Importer.getToContext()) ObjCAtSynchronizedStmt(
ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody);
}
-Stmt *ASTNodeImporter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
- SourceLocation ToAtThrowLoc = Importer.Import(S->getThrowLoc());
- Expr *ToThrow = Importer.Import(S->getThrowExpr());
- if (!ToThrow && S->getThrowExpr())
- return nullptr;
- return new (Importer.getToContext()) ObjCAtThrowStmt(ToAtThrowLoc, ToThrow);
+ExpectedStmt ASTNodeImporter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ ExpectedSLoc ToThrowLocOrErr = import(S->getThrowLoc());
+ if (!ToThrowLocOrErr)
+ return ToThrowLocOrErr.takeError();
+ ExpectedExpr ToThrowExprOrErr = import(S->getThrowExpr());
+ if (!ToThrowExprOrErr)
+ return ToThrowExprOrErr.takeError();
+ return new (Importer.getToContext()) ObjCAtThrowStmt(
+ *ToThrowLocOrErr, *ToThrowExprOrErr);
}
-Stmt *ASTNodeImporter::VisitObjCAutoreleasePoolStmt
- (ObjCAutoreleasePoolStmt *S) {
- SourceLocation ToAtLoc = Importer.Import(S->getAtLoc());
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- return new (Importer.getToContext()) ObjCAutoreleasePoolStmt(ToAtLoc,
- ToSubStmt);
+ExpectedStmt ASTNodeImporter::VisitObjCAutoreleasePoolStmt(
+ ObjCAutoreleasePoolStmt *S) {
+ ExpectedSLoc ToAtLocOrErr = import(S->getAtLoc());
+ if (!ToAtLocOrErr)
+ return ToAtLocOrErr.takeError();
+ ExpectedStmt ToSubStmtOrErr = import(S->getSubStmt());
+ if (!ToSubStmtOrErr)
+ return ToSubStmtOrErr.takeError();
+ return new (Importer.getToContext()) ObjCAutoreleasePoolStmt(*ToAtLocOrErr,
+ *ToSubStmtOrErr);
}
//----------------------------------------------------------------------------
// Import Expressions
//----------------------------------------------------------------------------
-Expr *ASTNodeImporter::VisitExpr(Expr *E) {
+ExpectedStmt ASTNodeImporter::VisitExpr(Expr *E) {
Importer.FromDiag(E->getBeginLoc(), diag::err_unsupported_ast_node)
<< E->getStmtClassName();
- return nullptr;
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
-Expr *ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr && E->getSubExpr())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
+ auto Imp = importSeq(
+ E->getBuiltinLoc(), E->getSubExpr(), E->getWrittenTypeInfo(),
+ E->getRParenLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
- TypeSourceInfo *TInfo = Importer.Import(E->getWrittenTypeInfo());
- if (!TInfo)
- return nullptr;
+ SourceLocation ToBuiltinLoc, ToRParenLoc;
+ Expr *ToSubExpr;
+ TypeSourceInfo *ToWrittenTypeInfo;
+ QualType ToType;
+ std::tie(ToBuiltinLoc, ToSubExpr, ToWrittenTypeInfo, ToRParenLoc, ToType) =
+ *Imp;
return new (Importer.getToContext()) VAArgExpr(
- Importer.Import(E->getBuiltinLoc()), SubExpr, TInfo,
- Importer.Import(E->getRParenLoc()), T, E->isMicrosoftABI());
+ ToBuiltinLoc, ToSubExpr, ToWrittenTypeInfo, ToRParenLoc, ToType,
+ E->isMicrosoftABI());
}
-Expr *ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
- return new (Importer.getToContext())
- GNUNullExpr(T, Importer.Import(E->getBeginLoc()));
+ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
+ ExpectedType TypeOrErr = import(E->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
+
+ ExpectedSLoc BeginLocOrErr = import(E->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
+ return new (Importer.getToContext()) GNUNullExpr(*TypeOrErr, *BeginLocOrErr);
}
-Expr *ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
+ auto Imp = importSeq(
+ E->getBeginLoc(), E->getType(), E->getFunctionName());
+ if (!Imp)
+ return Imp.takeError();
- auto *SL = cast_or_null<StringLiteral>(Importer.Import(E->getFunctionName()));
- if (!SL && E->getFunctionName())
- return nullptr;
+ SourceLocation ToBeginLoc;
+ QualType ToType;
+ StringLiteral *ToFunctionName;
+ std::tie(ToBeginLoc, ToType, ToFunctionName) = *Imp;
- return new (Importer.getToContext()) PredefinedExpr(
- Importer.Import(E->getBeginLoc()), T, E->getIdentType(), SL);
+ return PredefinedExpr::Create(Importer.getToContext(), ToBeginLoc, ToType,
+ E->getIdentKind(), ToFunctionName);
}
-Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
- auto *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl()));
- if (!ToD)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
+ auto Imp = importSeq(
+ E->getQualifierLoc(), E->getTemplateKeywordLoc(), E->getDecl(),
+ E->getLocation(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ NestedNameSpecifierLoc ToQualifierLoc;
+ SourceLocation ToTemplateKeywordLoc, ToLocation;
+ ValueDecl *ToDecl;
+ QualType ToType;
+ std::tie(ToQualifierLoc, ToTemplateKeywordLoc, ToDecl, ToLocation, ToType) =
+ *Imp;
- NamedDecl *FoundD = nullptr;
+ NamedDecl *ToFoundD = nullptr;
if (E->getDecl() != E->getFoundDecl()) {
- FoundD = cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl()));
- if (!FoundD)
- return nullptr;
+ auto FoundDOrErr = import(E->getFoundDecl());
+ if (!FoundDOrErr)
+ return FoundDOrErr.takeError();
+ ToFoundD = *FoundDOrErr;
}
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
TemplateArgumentListInfo ToTAInfo;
- TemplateArgumentListInfo *ResInfo = nullptr;
+ TemplateArgumentListInfo *ToResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
- return nullptr;
- ResInfo = &ToTAInfo;
+ if (Error Err =
+ ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ return std::move(Err);
+ ToResInfo = &ToTAInfo;
}
- DeclRefExpr *DRE = DeclRefExpr::Create(Importer.getToContext(),
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()),
- ToD,
- E->refersToEnclosingVariableOrCapture(),
- Importer.Import(E->getLocation()),
- T, E->getValueKind(),
- FoundD, ResInfo);
+ auto *ToE = DeclRefExpr::Create(
+ Importer.getToContext(), ToQualifierLoc, ToTemplateKeywordLoc, ToDecl,
+ E->refersToEnclosingVariableOrCapture(), ToLocation, ToType,
+ E->getValueKind(), ToFoundD, ToResInfo);
if (E->hadMultipleCandidates())
- DRE->setHadMultipleCandidates(true);
- return DRE;
-}
-
-Expr *ASTNodeImporter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- return new (Importer.getToContext()) ImplicitValueInitExpr(T);
+ ToE->setHadMultipleCandidates(true);
+ return ToE;
}
-ASTNodeImporter::Designator
-ASTNodeImporter::ImportDesignator(const Designator &D) {
- if (D.isFieldDesignator()) {
- IdentifierInfo *ToFieldName = Importer.Import(D.getFieldName());
- // Caller checks for import error
- return Designator(ToFieldName, Importer.Import(D.getDotLoc()),
- Importer.Import(D.getFieldLoc()));
- }
- if (D.isArrayDesignator())
- return Designator(D.getFirstExprIndex(),
- Importer.Import(D.getLBracketLoc()),
- Importer.Import(D.getRBracketLoc()));
+ExpectedStmt ASTNodeImporter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ ExpectedType TypeOrErr = import(E->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
- assert(D.isArrayRangeDesignator());
- return Designator(D.getFirstExprIndex(),
- Importer.Import(D.getLBracketLoc()),
- Importer.Import(D.getEllipsisLoc()),
- Importer.Import(D.getRBracketLoc()));
+ return new (Importer.getToContext()) ImplicitValueInitExpr(*TypeOrErr);
}
+ExpectedStmt ASTNodeImporter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ ExpectedExpr ToInitOrErr = import(E->getInit());
+ if (!ToInitOrErr)
+ return ToInitOrErr.takeError();
-Expr *ASTNodeImporter::VisitDesignatedInitExpr(DesignatedInitExpr *DIE) {
- auto *Init = cast_or_null<Expr>(Importer.Import(DIE->getInit()));
- if (!Init)
- return nullptr;
+ ExpectedSLoc ToEqualOrColonLocOrErr = import(E->getEqualOrColonLoc());
+ if (!ToEqualOrColonLocOrErr)
+ return ToEqualOrColonLocOrErr.takeError();
- SmallVector<Expr *, 4> IndexExprs(DIE->getNumSubExprs() - 1);
+ SmallVector<Expr *, 4> ToIndexExprs(E->getNumSubExprs() - 1);
// List elements from the second, the first is Init itself
- for (unsigned I = 1, E = DIE->getNumSubExprs(); I < E; I++) {
- if (auto *Arg = cast_or_null<Expr>(Importer.Import(DIE->getSubExpr(I))))
- IndexExprs[I - 1] = Arg;
+ for (unsigned I = 1, N = E->getNumSubExprs(); I < N; I++) {
+ if (ExpectedExpr ToArgOrErr = import(E->getSubExpr(I)))
+ ToIndexExprs[I - 1] = *ToArgOrErr;
else
- return nullptr;
+ return ToArgOrErr.takeError();
}
- SmallVector<Designator, 4> Designators(DIE->size());
- llvm::transform(DIE->designators(), Designators.begin(),
- [this](const Designator &D) -> Designator {
- return ImportDesignator(D);
- });
-
- for (const auto &D : DIE->designators())
- if (D.isFieldDesignator() && !D.getFieldName())
- return nullptr;
+ SmallVector<Designator, 4> ToDesignators(E->size());
+ if (Error Err = ImportContainerChecked(E->designators(), ToDesignators))
+ return std::move(Err);
return DesignatedInitExpr::Create(
- Importer.getToContext(), Designators,
- IndexExprs, Importer.Import(DIE->getEqualOrColonLoc()),
- DIE->usesGNUSyntax(), Init);
+ Importer.getToContext(), ToDesignators,
+ ToIndexExprs, *ToEqualOrColonLocOrErr,
+ E->usesGNUSyntax(), *ToInitOrErr);
}
-Expr *ASTNodeImporter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- return new (Importer.getToContext())
- CXXNullPtrLiteralExpr(T, Importer.Import(E->getLocation()));
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return new (Importer.getToContext()) CXXNullPtrLiteralExpr(
+ *ToTypeOrErr, *ToLocationOrErr);
}
-Expr *ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- return IntegerLiteral::Create(Importer.getToContext(),
- E->getValue(), T,
- Importer.Import(E->getLocation()));
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return IntegerLiteral::Create(
+ Importer.getToContext(), E->getValue(), *ToTypeOrErr, *ToLocationOrErr);
}
-Expr *ASTNodeImporter::VisitFloatingLiteral(FloatingLiteral *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
- return FloatingLiteral::Create(Importer.getToContext(),
- E->getValue(), E->isExact(), T,
- Importer.Import(E->getLocation()));
+ExpectedStmt ASTNodeImporter::VisitFloatingLiteral(FloatingLiteral *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return FloatingLiteral::Create(
+ Importer.getToContext(), E->getValue(), E->isExact(),
+ *ToTypeOrErr, *ToLocationOrErr);
}
-Expr *ASTNodeImporter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ auto ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- Expr *SubE = Importer.Import(E->getSubExpr());
- if (!SubE)
- return nullptr;
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- return new (Importer.getToContext()) ImaginaryLiteral(SubE, T);
+ return new (Importer.getToContext()) ImaginaryLiteral(
+ *ToSubExprOrErr, *ToTypeOrErr);
}
-Expr *ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- return new (Importer.getToContext()) CharacterLiteral(E->getValue(),
- E->getKind(), T,
- Importer.Import(E->getLocation()));
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return new (Importer.getToContext()) CharacterLiteral(
+ E->getValue(), E->getKind(), *ToTypeOrErr, *ToLocationOrErr);
}
-Expr *ASTNodeImporter::VisitStringLiteral(StringLiteral *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitStringLiteral(StringLiteral *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- SmallVector<SourceLocation, 4> Locations(E->getNumConcatenated());
- ImportArray(E->tokloc_begin(), E->tokloc_end(), Locations.begin());
+ SmallVector<SourceLocation, 4> ToLocations(E->getNumConcatenated());
+ if (Error Err = ImportArrayChecked(
+ E->tokloc_begin(), E->tokloc_end(), ToLocations.begin()))
+ return std::move(Err);
- return StringLiteral::Create(Importer.getToContext(), E->getBytes(),
- E->getKind(), E->isPascal(), T,
- Locations.data(), Locations.size());
+ return StringLiteral::Create(
+ Importer.getToContext(), E->getBytes(), E->getKind(), E->isPascal(),
+ *ToTypeOrErr, ToLocations.data(), ToLocations.size());
}
-Expr *ASTNodeImporter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- TypeSourceInfo *TInfo = Importer.Import(E->getTypeSourceInfo());
- if (!TInfo)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ auto Imp = importSeq(
+ E->getLParenLoc(), E->getTypeSourceInfo(), E->getType(),
+ E->getInitializer());
+ if (!Imp)
+ return Imp.takeError();
- Expr *Init = Importer.Import(E->getInitializer());
- if (!Init)
- return nullptr;
+ SourceLocation ToLParenLoc;
+ TypeSourceInfo *ToTypeSourceInfo;
+ QualType ToType;
+ Expr *ToInitializer;
+ std::tie(ToLParenLoc, ToTypeSourceInfo, ToType, ToInitializer) = *Imp;
return new (Importer.getToContext()) CompoundLiteralExpr(
- Importer.Import(E->getLParenLoc()), TInfo, T, E->getValueKind(),
- Init, E->isFileScope());
+ ToLParenLoc, ToTypeSourceInfo, ToType, E->getValueKind(),
+ ToInitializer, E->isFileScope());
}
-Expr *ASTNodeImporter::VisitAtomicExpr(AtomicExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitAtomicExpr(AtomicExpr *E) {
+ auto Imp = importSeq(
+ E->getBuiltinLoc(), E->getType(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- SmallVector<Expr *, 6> Exprs(E->getNumSubExprs());
- if (ImportArrayChecked(
- E->getSubExprs(), E->getSubExprs() + E->getNumSubExprs(),
- Exprs.begin()))
- return nullptr;
+ SourceLocation ToBuiltinLoc, ToRParenLoc;
+ QualType ToType;
+ std::tie(ToBuiltinLoc, ToType, ToRParenLoc) = *Imp;
+
+ SmallVector<Expr *, 6> ToExprs(E->getNumSubExprs());
+ if (Error Err = ImportArrayChecked(
+ E->getSubExprs(), E->getSubExprs() + E->getNumSubExprs(),
+ ToExprs.begin()))
+ return std::move(Err);
return new (Importer.getToContext()) AtomicExpr(
- Importer.Import(E->getBuiltinLoc()), Exprs, T, E->getOp(),
- Importer.Import(E->getRParenLoc()));
+ ToBuiltinLoc, ToExprs, ToType, E->getOp(), ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitAddrLabelExpr(AddrLabelExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitAddrLabelExpr(AddrLabelExpr *E) {
+ auto Imp = importSeq(
+ E->getAmpAmpLoc(), E->getLabelLoc(), E->getLabel(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
- auto *ToLabel = cast_or_null<LabelDecl>(Importer.Import(E->getLabel()));
- if (!ToLabel)
- return nullptr;
+ SourceLocation ToAmpAmpLoc, ToLabelLoc;
+ LabelDecl *ToLabel;
+ QualType ToType;
+ std::tie(ToAmpAmpLoc, ToLabelLoc, ToLabel, ToType) = *Imp;
return new (Importer.getToContext()) AddrLabelExpr(
- Importer.Import(E->getAmpAmpLoc()), Importer.Import(E->getLabelLoc()),
- ToLabel, T);
+ ToAmpAmpLoc, ToLabelLoc, ToLabel, ToType);
}
-Expr *ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitConstantExpr(ConstantExpr *E) {
+ auto Imp = importSeq(E->getSubExpr());
+ if (!Imp)
+ return Imp.takeError();
+
+ Expr *ToSubExpr;
+ std::tie(ToSubExpr) = *Imp;
+
+ return ConstantExpr::Create(Importer.getToContext(), ToSubExpr);
+}
+
+ExpectedStmt ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
+ auto Imp = importSeq(E->getLParen(), E->getRParen(), E->getSubExpr());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToLParen, ToRParen;
+ Expr *ToSubExpr;
+ std::tie(ToLParen, ToRParen, ToSubExpr) = *Imp;
return new (Importer.getToContext())
- ParenExpr(Importer.Import(E->getLParen()),
- Importer.Import(E->getRParen()),
- SubExpr);
+ ParenExpr(ToLParen, ToRParen, ToSubExpr);
}
-Expr *ASTNodeImporter::VisitParenListExpr(ParenListExpr *E) {
- SmallVector<Expr *, 4> Exprs(E->getNumExprs());
- if (ImportContainerChecked(E->exprs(), Exprs))
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitParenListExpr(ParenListExpr *E) {
+ SmallVector<Expr *, 4> ToExprs(E->getNumExprs());
+ if (Error Err = ImportContainerChecked(E->exprs(), ToExprs))
+ return std::move(Err);
+
+ ExpectedSLoc ToLParenLocOrErr = import(E->getLParenLoc());
+ if (!ToLParenLocOrErr)
+ return ToLParenLocOrErr.takeError();
- return new (Importer.getToContext()) ParenListExpr(
- Importer.getToContext(), Importer.Import(E->getLParenLoc()),
- Exprs, Importer.Import(E->getLParenLoc()));
+ ExpectedSLoc ToRParenLocOrErr = import(E->getRParenLoc());
+ if (!ToRParenLocOrErr)
+ return ToRParenLocOrErr.takeError();
+
+ return ParenListExpr::Create(Importer.getToContext(), *ToLParenLocOrErr,
+ ToExprs, *ToRParenLocOrErr);
}
-Expr *ASTNodeImporter::VisitStmtExpr(StmtExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitStmtExpr(StmtExpr *E) {
+ auto Imp = importSeq(
+ E->getSubStmt(), E->getType(), E->getLParenLoc(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- auto *ToSubStmt = cast_or_null<CompoundStmt>(
- Importer.Import(E->getSubStmt()));
- if (!ToSubStmt && E->getSubStmt())
- return nullptr;
+ CompoundStmt *ToSubStmt;
+ QualType ToType;
+ SourceLocation ToLParenLoc, ToRParenLoc;
+ std::tie(ToSubStmt, ToType, ToLParenLoc, ToRParenLoc) = *Imp;
- return new (Importer.getToContext()) StmtExpr(ToSubStmt, T,
- Importer.Import(E->getLParenLoc()), Importer.Import(E->getRParenLoc()));
+ return new (Importer.getToContext()) StmtExpr(
+ ToSubStmt, ToType, ToLParenLoc, ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
+ auto Imp = importSeq(
+ E->getSubExpr(), E->getType(), E->getOperatorLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ Expr *ToSubExpr;
+ QualType ToType;
+ SourceLocation ToOperatorLoc;
+ std::tie(ToSubExpr, ToType, ToOperatorLoc) = *Imp;
return new (Importer.getToContext()) UnaryOperator(
- SubExpr, E->getOpcode(), T, E->getValueKind(), E->getObjectKind(),
- Importer.Import(E->getOperatorLoc()), E->canOverflow());
+ ToSubExpr, E->getOpcode(), ToType, E->getValueKind(), E->getObjectKind(),
+ ToOperatorLoc, E->canOverflow());
}
-Expr *
+ExpectedStmt
ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
- QualType ResultType = Importer.Import(E->getType());
+ auto Imp = importSeq(E->getType(), E->getOperatorLoc(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ SourceLocation ToOperatorLoc, ToRParenLoc;
+ std::tie(ToType, ToOperatorLoc, ToRParenLoc) = *Imp;
if (E->isArgumentType()) {
- TypeSourceInfo *TInfo = Importer.Import(E->getArgumentTypeInfo());
- if (!TInfo)
- return nullptr;
+ Expected<TypeSourceInfo *> ToArgumentTypeInfoOrErr =
+ import(E->getArgumentTypeInfo());
+ if (!ToArgumentTypeInfoOrErr)
+ return ToArgumentTypeInfoOrErr.takeError();
- return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
- TInfo, ResultType,
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getRParenLoc()));
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(
+ E->getKind(), *ToArgumentTypeInfoOrErr, ToType, ToOperatorLoc,
+ ToRParenLoc);
}
- Expr *SubExpr = Importer.Import(E->getArgumentExpr());
- if (!SubExpr)
- return nullptr;
+ ExpectedExpr ToArgumentExprOrErr = import(E->getArgumentExpr());
+ if (!ToArgumentExprOrErr)
+ return ToArgumentExprOrErr.takeError();
- return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
- SubExpr, ResultType,
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getRParenLoc()));
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(
+ E->getKind(), *ToArgumentExprOrErr, ToType, ToOperatorLoc, ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *LHS = Importer.Import(E->getLHS());
- if (!LHS)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
+ auto Imp = importSeq(
+ E->getLHS(), E->getRHS(), E->getType(), E->getOperatorLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *RHS = Importer.Import(E->getRHS());
- if (!RHS)
- return nullptr;
+ Expr *ToLHS, *ToRHS;
+ QualType ToType;
+ SourceLocation ToOperatorLoc;
+ std::tie(ToLHS, ToRHS, ToType, ToOperatorLoc) = *Imp;
- return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(),
- T, E->getValueKind(),
- E->getObjectKind(),
- Importer.Import(E->getOperatorLoc()),
- E->getFPFeatures());
+ return new (Importer.getToContext()) BinaryOperator(
+ ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(),
+ E->getObjectKind(), ToOperatorLoc, E->getFPFeatures());
}
-Expr *ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) {
+ auto Imp = importSeq(
+ E->getCond(), E->getQuestionLoc(), E->getLHS(), E->getColonLoc(),
+ E->getRHS(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToLHS = Importer.Import(E->getLHS());
- if (!ToLHS)
- return nullptr;
-
- Expr *ToRHS = Importer.Import(E->getRHS());
- if (!ToRHS)
- return nullptr;
-
- Expr *ToCond = Importer.Import(E->getCond());
- if (!ToCond)
- return nullptr;
+ Expr *ToCond, *ToLHS, *ToRHS;
+ SourceLocation ToQuestionLoc, ToColonLoc;
+ QualType ToType;
+ std::tie(ToCond, ToQuestionLoc, ToLHS, ToColonLoc, ToRHS, ToType) = *Imp;
return new (Importer.getToContext()) ConditionalOperator(
- ToCond, Importer.Import(E->getQuestionLoc()),
- ToLHS, Importer.Import(E->getColonLoc()),
- ToRHS, T, E->getValueKind(), E->getObjectKind());
+ ToCond, ToQuestionLoc, ToLHS, ToColonLoc, ToRHS, ToType,
+ E->getValueKind(), E->getObjectKind());
}
-Expr *ASTNodeImporter::VisitBinaryConditionalOperator(
+ExpectedStmt ASTNodeImporter::VisitBinaryConditionalOperator(
BinaryConditionalOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *Common = Importer.Import(E->getCommon());
- if (!Common)
- return nullptr;
-
- Expr *Cond = Importer.Import(E->getCond());
- if (!Cond)
- return nullptr;
-
- auto *OpaqueValue = cast_or_null<OpaqueValueExpr>(
- Importer.Import(E->getOpaqueValue()));
- if (!OpaqueValue)
- return nullptr;
-
- Expr *TrueExpr = Importer.Import(E->getTrueExpr());
- if (!TrueExpr)
- return nullptr;
-
- Expr *FalseExpr = Importer.Import(E->getFalseExpr());
- if (!FalseExpr)
- return nullptr;
+ auto Imp = importSeq(
+ E->getCommon(), E->getOpaqueValue(), E->getCond(), E->getTrueExpr(),
+ E->getFalseExpr(), E->getQuestionLoc(), E->getColonLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ Expr *ToCommon, *ToCond, *ToTrueExpr, *ToFalseExpr;
+ OpaqueValueExpr *ToOpaqueValue;
+ SourceLocation ToQuestionLoc, ToColonLoc;
+ QualType ToType;
+ std::tie(
+ ToCommon, ToOpaqueValue, ToCond, ToTrueExpr, ToFalseExpr, ToQuestionLoc,
+ ToColonLoc, ToType) = *Imp;
return new (Importer.getToContext()) BinaryConditionalOperator(
- Common, OpaqueValue, Cond, TrueExpr, FalseExpr,
- Importer.Import(E->getQuestionLoc()), Importer.Import(E->getColonLoc()),
- T, E->getValueKind(), E->getObjectKind());
-}
-
-Expr *ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- TypeSourceInfo *ToQueried = Importer.Import(E->getQueriedTypeSourceInfo());
- if (!ToQueried)
- return nullptr;
-
- Expr *Dim = Importer.Import(E->getDimensionExpression());
- if (!Dim && E->getDimensionExpression())
- return nullptr;
+ ToCommon, ToOpaqueValue, ToCond, ToTrueExpr, ToFalseExpr,
+ ToQuestionLoc, ToColonLoc, ToType, E->getValueKind(),
+ E->getObjectKind());
+}
+
+ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ auto Imp = importSeq(
+ E->getBeginLoc(), E->getQueriedTypeSourceInfo(),
+ E->getDimensionExpression(), E->getEndLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToBeginLoc, ToEndLoc;
+ TypeSourceInfo *ToQueriedTypeSourceInfo;
+ Expr *ToDimensionExpression;
+ QualType ToType;
+ std::tie(
+ ToBeginLoc, ToQueriedTypeSourceInfo, ToDimensionExpression, ToEndLoc,
+ ToType) = *Imp;
return new (Importer.getToContext()) ArrayTypeTraitExpr(
- Importer.Import(E->getBeginLoc()), E->getTrait(), ToQueried,
- E->getValue(), Dim, Importer.Import(E->getEndLoc()), T);
+ ToBeginLoc, E->getTrait(), ToQueriedTypeSourceInfo, E->getValue(),
+ ToDimensionExpression, ToEndLoc, ToType);
}
-Expr *ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ auto Imp = importSeq(
+ E->getBeginLoc(), E->getQueriedExpression(), E->getEndLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToQueried = Importer.Import(E->getQueriedExpression());
- if (!ToQueried)
- return nullptr;
+ SourceLocation ToBeginLoc, ToEndLoc;
+ Expr *ToQueriedExpression;
+ QualType ToType;
+ std::tie(ToBeginLoc, ToQueriedExpression, ToEndLoc, ToType) = *Imp;
return new (Importer.getToContext()) ExpressionTraitExpr(
- Importer.Import(E->getBeginLoc()), E->getTrait(), ToQueried,
- E->getValue(), Importer.Import(E->getEndLoc()), T);
+ ToBeginLoc, E->getTrait(), ToQueriedExpression, E->getValue(),
+ ToEndLoc, ToType);
}
-Expr *ASTNodeImporter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ auto Imp = importSeq(
+ E->getLocation(), E->getType(), E->getSourceExpr());
+ if (!Imp)
+ return Imp.takeError();
- Expr *SourceExpr = Importer.Import(E->getSourceExpr());
- if (!SourceExpr && E->getSourceExpr())
- return nullptr;
+ SourceLocation ToLocation;
+ QualType ToType;
+ Expr *ToSourceExpr;
+ std::tie(ToLocation, ToType, ToSourceExpr) = *Imp;
return new (Importer.getToContext()) OpaqueValueExpr(
- Importer.Import(E->getLocation()), T, E->getValueKind(),
- E->getObjectKind(), SourceExpr);
+ ToLocation, ToType, E->getValueKind(), E->getObjectKind(), ToSourceExpr);
}
-Expr *ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *ToLHS = Importer.Import(E->getLHS());
- if (!ToLHS)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ auto Imp = importSeq(
+ E->getLHS(), E->getRHS(), E->getType(), E->getRBracketLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToRHS = Importer.Import(E->getRHS());
- if (!ToRHS)
- return nullptr;
+ Expr *ToLHS, *ToRHS;
+ SourceLocation ToRBracketLoc;
+ QualType ToType;
+ std::tie(ToLHS, ToRHS, ToType, ToRBracketLoc) = *Imp;
return new (Importer.getToContext()) ArraySubscriptExpr(
- ToLHS, ToRHS, T, E->getValueKind(), E->getObjectKind(),
- Importer.Import(E->getRBracketLoc()));
+ ToLHS, ToRHS, ToType, E->getValueKind(), E->getObjectKind(),
+ ToRBracketLoc);
}
-Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- QualType CompLHSType = Importer.Import(E->getComputationLHSType());
- if (CompLHSType.isNull())
- return nullptr;
-
- QualType CompResultType = Importer.Import(E->getComputationResultType());
- if (CompResultType.isNull())
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ auto Imp = importSeq(
+ E->getLHS(), E->getRHS(), E->getType(), E->getComputationLHSType(),
+ E->getComputationResultType(), E->getOperatorLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *LHS = Importer.Import(E->getLHS());
- if (!LHS)
- return nullptr;
-
- Expr *RHS = Importer.Import(E->getRHS());
- if (!RHS)
- return nullptr;
+ Expr *ToLHS, *ToRHS;
+ QualType ToType, ToComputationLHSType, ToComputationResultType;
+ SourceLocation ToOperatorLoc;
+ std::tie(ToLHS, ToRHS, ToType, ToComputationLHSType, ToComputationResultType,
+ ToOperatorLoc) = *Imp;
- return new (Importer.getToContext())
- CompoundAssignOperator(LHS, RHS, E->getOpcode(),
- T, E->getValueKind(),
- E->getObjectKind(),
- CompLHSType, CompResultType,
- Importer.Import(E->getOperatorLoc()),
- E->getFPFeatures());
+ return new (Importer.getToContext()) CompoundAssignOperator(
+ ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(),
+ E->getObjectKind(), ToComputationLHSType, ToComputationResultType,
+ ToOperatorLoc, E->getFPFeatures());
}
-bool ASTNodeImporter::ImportCastPath(CastExpr *CE, CXXCastPath &Path) {
+Expected<CXXCastPath>
+ASTNodeImporter::ImportCastPath(CastExpr *CE) {
+ CXXCastPath Path;
for (auto I = CE->path_begin(), E = CE->path_end(); I != E; ++I) {
- if (CXXBaseSpecifier *Spec = Importer.Import(*I))
- Path.push_back(Spec);
+ if (auto SpecOrErr = import(*I))
+ Path.push_back(*SpecOrErr);
else
- return true;
+ return SpecOrErr.takeError();
}
- return false;
+ return Path;
}
-Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- CXXCastPath BasePath;
- if (ImportCastPath(E, BasePath))
- return nullptr;
+ Expected<CXXCastPath> ToBasePathOrErr = ImportCastPath(E);
+ if (!ToBasePathOrErr)
+ return ToBasePathOrErr.takeError();
- return ImplicitCastExpr::Create(Importer.getToContext(), T, E->getCastKind(),
- SubExpr, &BasePath, E->getValueKind());
+ return ImplicitCastExpr::Create(
+ Importer.getToContext(), *ToTypeOrErr, E->getCastKind(), *ToSubExprOrErr,
+ &(*ToBasePathOrErr), E->getValueKind());
}
-Expr *ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ auto Imp1 = importSeq(
+ E->getType(), E->getSubExpr(), E->getTypeInfoAsWritten());
+ if (!Imp1)
+ return Imp1.takeError();
- TypeSourceInfo *TInfo = Importer.Import(E->getTypeInfoAsWritten());
- if (!TInfo && E->getTypeInfoAsWritten())
- return nullptr;
+ QualType ToType;
+ Expr *ToSubExpr;
+ TypeSourceInfo *ToTypeInfoAsWritten;
+ std::tie(ToType, ToSubExpr, ToTypeInfoAsWritten) = *Imp1;
- CXXCastPath BasePath;
- if (ImportCastPath(E, BasePath))
- return nullptr;
+ Expected<CXXCastPath> ToBasePathOrErr = ImportCastPath(E);
+ if (!ToBasePathOrErr)
+ return ToBasePathOrErr.takeError();
+ CXXCastPath *ToBasePath = &(*ToBasePathOrErr);
switch (E->getStmtClass()) {
case Stmt::CStyleCastExprClass: {
auto *CCE = cast<CStyleCastExpr>(E);
- return CStyleCastExpr::Create(Importer.getToContext(), T,
- E->getValueKind(), E->getCastKind(),
- SubExpr, &BasePath, TInfo,
- Importer.Import(CCE->getLParenLoc()),
- Importer.Import(CCE->getRParenLoc()));
+ ExpectedSLoc ToLParenLocOrErr = import(CCE->getLParenLoc());
+ if (!ToLParenLocOrErr)
+ return ToLParenLocOrErr.takeError();
+ ExpectedSLoc ToRParenLocOrErr = import(CCE->getRParenLoc());
+ if (!ToRParenLocOrErr)
+ return ToRParenLocOrErr.takeError();
+ return CStyleCastExpr::Create(
+ Importer.getToContext(), ToType, E->getValueKind(), E->getCastKind(),
+ ToSubExpr, ToBasePath, ToTypeInfoAsWritten, *ToLParenLocOrErr,
+ *ToRParenLocOrErr);
}
case Stmt::CXXFunctionalCastExprClass: {
auto *FCE = cast<CXXFunctionalCastExpr>(E);
- return CXXFunctionalCastExpr::Create(Importer.getToContext(), T,
- E->getValueKind(), TInfo,
- E->getCastKind(), SubExpr, &BasePath,
- Importer.Import(FCE->getLParenLoc()),
- Importer.Import(FCE->getRParenLoc()));
+ ExpectedSLoc ToLParenLocOrErr = import(FCE->getLParenLoc());
+ if (!ToLParenLocOrErr)
+ return ToLParenLocOrErr.takeError();
+ ExpectedSLoc ToRParenLocOrErr = import(FCE->getRParenLoc());
+ if (!ToRParenLocOrErr)
+ return ToRParenLocOrErr.takeError();
+ return CXXFunctionalCastExpr::Create(
+ Importer.getToContext(), ToType, E->getValueKind(), ToTypeInfoAsWritten,
+ E->getCastKind(), ToSubExpr, ToBasePath, *ToLParenLocOrErr,
+ *ToRParenLocOrErr);
}
case Stmt::ObjCBridgedCastExprClass: {
- auto *OCE = cast<ObjCBridgedCastExpr>(E);
- return new (Importer.getToContext()) ObjCBridgedCastExpr(
- Importer.Import(OCE->getLParenLoc()), OCE->getBridgeKind(),
- E->getCastKind(), Importer.Import(OCE->getBridgeKeywordLoc()),
- TInfo, SubExpr);
+ auto *OCE = cast<ObjCBridgedCastExpr>(E);
+ ExpectedSLoc ToLParenLocOrErr = import(OCE->getLParenLoc());
+ if (!ToLParenLocOrErr)
+ return ToLParenLocOrErr.takeError();
+ ExpectedSLoc ToBridgeKeywordLocOrErr = import(OCE->getBridgeKeywordLoc());
+ if (!ToBridgeKeywordLocOrErr)
+ return ToBridgeKeywordLocOrErr.takeError();
+ return new (Importer.getToContext()) ObjCBridgedCastExpr(
+ *ToLParenLocOrErr, OCE->getBridgeKind(), E->getCastKind(),
+ *ToBridgeKeywordLocOrErr, ToTypeInfoAsWritten, ToSubExpr);
}
default:
llvm_unreachable("Cast expression of unsupported type!");
- return nullptr;
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
}
-Expr *ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *OE) {
- QualType T = Importer.Import(OE->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ SmallVector<OffsetOfNode, 4> ToNodes;
+ for (int I = 0, N = E->getNumComponents(); I < N; ++I) {
+ const OffsetOfNode &FromNode = E->getComponent(I);
- SmallVector<OffsetOfNode, 4> Nodes;
- for (int I = 0, E = OE->getNumComponents(); I < E; ++I) {
- const OffsetOfNode &Node = OE->getComponent(I);
+ SourceLocation ToBeginLoc, ToEndLoc;
+ if (FromNode.getKind() != OffsetOfNode::Base) {
+ auto Imp = importSeq(FromNode.getBeginLoc(), FromNode.getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
+ std::tie(ToBeginLoc, ToEndLoc) = *Imp;
+ }
- switch (Node.getKind()) {
+ switch (FromNode.getKind()) {
case OffsetOfNode::Array:
- Nodes.push_back(OffsetOfNode(Importer.Import(Node.getBeginLoc()),
- Node.getArrayExprIndex(),
- Importer.Import(Node.getEndLoc())));
+ ToNodes.push_back(
+ OffsetOfNode(ToBeginLoc, FromNode.getArrayExprIndex(), ToEndLoc));
break;
-
case OffsetOfNode::Base: {
- CXXBaseSpecifier *BS = Importer.Import(Node.getBase());
- if (!BS && Node.getBase())
- return nullptr;
- Nodes.push_back(OffsetOfNode(BS));
+ auto ToBSOrErr = import(FromNode.getBase());
+ if (!ToBSOrErr)
+ return ToBSOrErr.takeError();
+ ToNodes.push_back(OffsetOfNode(*ToBSOrErr));
break;
}
case OffsetOfNode::Field: {
- auto *FD = cast_or_null<FieldDecl>(Importer.Import(Node.getField()));
- if (!FD)
- return nullptr;
- Nodes.push_back(OffsetOfNode(Importer.Import(Node.getBeginLoc()), FD,
- Importer.Import(Node.getEndLoc())));
+ auto ToFieldOrErr = import(FromNode.getField());
+ if (!ToFieldOrErr)
+ return ToFieldOrErr.takeError();
+ ToNodes.push_back(OffsetOfNode(ToBeginLoc, *ToFieldOrErr, ToEndLoc));
break;
}
case OffsetOfNode::Identifier: {
- IdentifierInfo *ToII = Importer.Import(Node.getFieldName());
- if (!ToII)
- return nullptr;
- Nodes.push_back(OffsetOfNode(Importer.Import(Node.getBeginLoc()), ToII,
- Importer.Import(Node.getEndLoc())));
+ IdentifierInfo *ToII = Importer.Import(FromNode.getFieldName());
+ ToNodes.push_back(OffsetOfNode(ToBeginLoc, ToII, ToEndLoc));
break;
}
}
}
- SmallVector<Expr *, 4> Exprs(OE->getNumExpressions());
- for (int I = 0, E = OE->getNumExpressions(); I < E; ++I) {
- Expr *ToIndexExpr = Importer.Import(OE->getIndexExpr(I));
- if (!ToIndexExpr)
- return nullptr;
- Exprs[I] = ToIndexExpr;
+ SmallVector<Expr *, 4> ToExprs(E->getNumExpressions());
+ for (int I = 0, N = E->getNumExpressions(); I < N; ++I) {
+ ExpectedExpr ToIndexExprOrErr = import(E->getIndexExpr(I));
+ if (!ToIndexExprOrErr)
+ return ToIndexExprOrErr.takeError();
+ ToExprs[I] = *ToIndexExprOrErr;
}
- TypeSourceInfo *TInfo = Importer.Import(OE->getTypeSourceInfo());
- if (!TInfo && OE->getTypeSourceInfo())
- return nullptr;
+ auto Imp = importSeq(
+ E->getType(), E->getTypeSourceInfo(), E->getOperatorLoc(),
+ E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- return OffsetOfExpr::Create(Importer.getToContext(), T,
- Importer.Import(OE->getOperatorLoc()),
- TInfo, Nodes, Exprs,
- Importer.Import(OE->getRParenLoc()));
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToOperatorLoc, ToRParenLoc;
+ std::tie(ToType, ToTypeSourceInfo, ToOperatorLoc, ToRParenLoc) = *Imp;
+
+ return OffsetOfExpr::Create(
+ Importer.getToContext(), ToType, ToOperatorLoc, ToTypeSourceInfo, ToNodes,
+ ToExprs, ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getOperand(), E->getBeginLoc(), E->getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *Operand = Importer.Import(E->getOperand());
- if (!Operand)
- return nullptr;
+ QualType ToType;
+ Expr *ToOperand;
+ SourceLocation ToBeginLoc, ToEndLoc;
+ std::tie(ToType, ToOperand, ToBeginLoc, ToEndLoc) = *Imp;
- CanThrowResult CanThrow;
+ CanThrowResult ToCanThrow;
if (E->isValueDependent())
- CanThrow = CT_Dependent;
+ ToCanThrow = CT_Dependent;
else
- CanThrow = E->getValue() ? CT_Can : CT_Cannot;
+ ToCanThrow = E->getValue() ? CT_Can : CT_Cannot;
- return new (Importer.getToContext())
- CXXNoexceptExpr(T, Operand, CanThrow, Importer.Import(E->getBeginLoc()),
- Importer.Import(E->getEndLoc()));
+ return new (Importer.getToContext()) CXXNoexceptExpr(
+ ToType, ToOperand, ToCanThrow, ToBeginLoc, ToEndLoc);
}
-Expr *ASTNodeImporter::VisitCXXThrowExpr(CXXThrowExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXThrowExpr(CXXThrowExpr *E) {
+ auto Imp = importSeq(E->getSubExpr(), E->getType(), E->getThrowLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr && E->getSubExpr())
- return nullptr;
+ Expr *ToSubExpr;
+ QualType ToType;
+ SourceLocation ToThrowLoc;
+ std::tie(ToSubExpr, ToType, ToThrowLoc) = *Imp;
return new (Importer.getToContext()) CXXThrowExpr(
- SubExpr, T, Importer.Import(E->getThrowLoc()),
- E->isThrownVariableInScope());
+ ToSubExpr, ToType, ToThrowLoc, E->isThrownVariableInScope());
}
-Expr *ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
- auto *Param = cast_or_null<ParmVarDecl>(Importer.Import(E->getParam()));
- if (!Param)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ ExpectedSLoc ToUsedLocOrErr = import(E->getUsedLocation());
+ if (!ToUsedLocOrErr)
+ return ToUsedLocOrErr.takeError();
+
+ auto ToParamOrErr = import(E->getParam());
+ if (!ToParamOrErr)
+ return ToParamOrErr.takeError();
return CXXDefaultArgExpr::Create(
- Importer.getToContext(), Importer.Import(E->getUsedLocation()), Param);
+ Importer.getToContext(), *ToUsedLocOrErr, *ToParamOrErr);
}
-Expr *ASTNodeImporter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getTypeSourceInfo(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- TypeSourceInfo *TypeInfo = Importer.Import(E->getTypeSourceInfo());
- if (!TypeInfo)
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToRParenLoc;
+ std::tie(ToType, ToTypeSourceInfo, ToRParenLoc) = *Imp;
return new (Importer.getToContext()) CXXScalarValueInitExpr(
- T, TypeInfo, Importer.Import(E->getRParenLoc()));
+ ToType, ToTypeSourceInfo, ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- auto *Dtor = cast_or_null<CXXDestructorDecl>(
- Importer.Import(const_cast<CXXDestructorDecl *>(
- E->getTemporary()->getDestructor())));
- if (!Dtor)
- return nullptr;
+ auto ToDtorOrErr = import(E->getTemporary()->getDestructor());
+ if (!ToDtorOrErr)
+ return ToDtorOrErr.takeError();
ASTContext &ToCtx = Importer.getToContext();
- CXXTemporary *Temp = CXXTemporary::Create(ToCtx, Dtor);
- return CXXBindTemporaryExpr::Create(ToCtx, Temp, SubExpr);
+ CXXTemporary *Temp = CXXTemporary::Create(ToCtx, *ToDtorOrErr);
+ return CXXBindTemporaryExpr::Create(ToCtx, Temp, *ToSubExprOrErr);
}
-Expr *ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *CE) {
- QualType T = Importer.Import(CE->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
+ auto Imp = importSeq(
+ E->getConstructor(), E->getType(), E->getTypeSourceInfo(),
+ E->getParenOrBraceRange());
+ if (!Imp)
+ return Imp.takeError();
- TypeSourceInfo *TInfo = Importer.Import(CE->getTypeSourceInfo());
- if (!TInfo)
- return nullptr;
-
- SmallVector<Expr *, 8> Args(CE->getNumArgs());
- if (ImportContainerChecked(CE->arguments(), Args))
- return nullptr;
+ CXXConstructorDecl *ToConstructor;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceRange ToParenOrBraceRange;
+ std::tie(ToConstructor, ToType, ToTypeSourceInfo, ToParenOrBraceRange) = *Imp;
- auto *Ctor = cast_or_null<CXXConstructorDecl>(
- Importer.Import(CE->getConstructor()));
- if (!Ctor)
- return nullptr;
+ SmallVector<Expr *, 8> ToArgs(E->getNumArgs());
+ if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
+ return std::move(Err);
return new (Importer.getToContext()) CXXTemporaryObjectExpr(
- Importer.getToContext(), Ctor, T, TInfo, Args,
- Importer.Import(CE->getParenOrBraceRange()), CE->hadMultipleCandidates(),
- CE->isListInitialization(), CE->isStdInitListInitialization(),
- CE->requiresZeroInitialization());
+ Importer.getToContext(), ToConstructor, ToType, ToTypeSourceInfo, ToArgs,
+ ToParenOrBraceRange, E->hadMultipleCandidates(),
+ E->isListInitialization(), E->isStdInitListInitialization(),
+ E->requiresZeroInitialization());
}
-Expr *
+ExpectedStmt
ASTNodeImporter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *TempE = Importer.Import(E->GetTemporaryExpr());
- if (!TempE)
- return nullptr;
+ auto Imp = importSeq(
+ E->getType(), E->GetTemporaryExpr(), E->getExtendingDecl());
+ if (!Imp)
+ return Imp.takeError();
- auto *ExtendedBy = cast_or_null<ValueDecl>(
- Importer.Import(const_cast<ValueDecl *>(E->getExtendingDecl())));
- if (!ExtendedBy && E->getExtendingDecl())
- return nullptr;
+ QualType ToType;
+ Expr *ToTemporaryExpr;
+ const ValueDecl *ToExtendingDecl;
+ std::tie(ToType, ToTemporaryExpr, ToExtendingDecl) = *Imp;
auto *ToMTE = new (Importer.getToContext()) MaterializeTemporaryExpr(
- T, TempE, E->isBoundToLvalueReference());
+ ToType, ToTemporaryExpr, E->isBoundToLvalueReference());
// FIXME: Should ManglingNumber get numbers associated with 'to' context?
- ToMTE->setExtendingDecl(ExtendedBy, E->getManglingNumber());
+ ToMTE->setExtendingDecl(ToExtendingDecl, E->getManglingNumber());
return ToMTE;
}
-Expr *ASTNodeImporter::VisitPackExpansionExpr(PackExpansionExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getPattern(), E->getEllipsisLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *Pattern = Importer.Import(E->getPattern());
- if (!Pattern)
- return nullptr;
+ QualType ToType;
+ Expr *ToPattern;
+ SourceLocation ToEllipsisLoc;
+ std::tie(ToType, ToPattern, ToEllipsisLoc) = *Imp;
return new (Importer.getToContext()) PackExpansionExpr(
- T, Pattern, Importer.Import(E->getEllipsisLoc()),
- E->getNumExpansions());
+ ToType, ToPattern, ToEllipsisLoc, E->getNumExpansions());
}
-Expr *ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
- auto *Pack = cast_or_null<NamedDecl>(Importer.Import(E->getPack()));
- if (!Pack)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ auto Imp = importSeq(
+ E->getOperatorLoc(), E->getPack(), E->getPackLoc(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- Optional<unsigned> Length;
+ SourceLocation ToOperatorLoc, ToPackLoc, ToRParenLoc;
+ NamedDecl *ToPack;
+ std::tie(ToOperatorLoc, ToPack, ToPackLoc, ToRParenLoc) = *Imp;
+ Optional<unsigned> Length;
if (!E->isValueDependent())
Length = E->getPackLength();
- SmallVector<TemplateArgument, 8> PartialArguments;
+ SmallVector<TemplateArgument, 8> ToPartialArguments;
if (E->isPartiallySubstituted()) {
- if (ImportTemplateArguments(E->getPartialArguments().data(),
- E->getPartialArguments().size(),
- PartialArguments))
- return nullptr;
+ if (Error Err = ImportTemplateArguments(
+ E->getPartialArguments().data(),
+ E->getPartialArguments().size(),
+ ToPartialArguments))
+ return std::move(Err);
}
return SizeOfPackExpr::Create(
- Importer.getToContext(), Importer.Import(E->getOperatorLoc()), Pack,
- Importer.Import(E->getPackLoc()), Importer.Import(E->getRParenLoc()),
- Length, PartialArguments);
-}
-
-Expr *ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *CE) {
- QualType T = Importer.Import(CE->getType());
- if (T.isNull())
- return nullptr;
-
- SmallVector<Expr *, 4> PlacementArgs(CE->getNumPlacementArgs());
- if (ImportContainerChecked(CE->placement_arguments(), PlacementArgs))
- return nullptr;
-
- auto *OperatorNewDecl = cast_or_null<FunctionDecl>(
- Importer.Import(CE->getOperatorNew()));
- if (!OperatorNewDecl && CE->getOperatorNew())
- return nullptr;
-
- auto *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
- Importer.Import(CE->getOperatorDelete()));
- if (!OperatorDeleteDecl && CE->getOperatorDelete())
- return nullptr;
-
- Expr *ToInit = Importer.Import(CE->getInitializer());
- if (!ToInit && CE->getInitializer())
- return nullptr;
-
- TypeSourceInfo *TInfo = Importer.Import(CE->getAllocatedTypeSourceInfo());
- if (!TInfo)
- return nullptr;
-
- Expr *ToArrSize = Importer.Import(CE->getArraySize());
- if (!ToArrSize && CE->getArraySize())
- return nullptr;
+ Importer.getToContext(), ToOperatorLoc, ToPack, ToPackLoc, ToRParenLoc,
+ Length, ToPartialArguments);
+}
+
+
+ExpectedStmt ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *E) {
+ auto Imp = importSeq(
+ E->getOperatorNew(), E->getOperatorDelete(), E->getTypeIdParens(),
+ E->getArraySize(), E->getInitializer(), E->getType(),
+ E->getAllocatedTypeSourceInfo(), E->getSourceRange(),
+ E->getDirectInitRange());
+ if (!Imp)
+ return Imp.takeError();
+
+ FunctionDecl *ToOperatorNew, *ToOperatorDelete;
+ SourceRange ToTypeIdParens, ToSourceRange, ToDirectInitRange;
+ Expr *ToArraySize, *ToInitializer;
+ QualType ToType;
+ TypeSourceInfo *ToAllocatedTypeSourceInfo;
+ std::tie(
+ ToOperatorNew, ToOperatorDelete, ToTypeIdParens, ToArraySize, ToInitializer,
+ ToType, ToAllocatedTypeSourceInfo, ToSourceRange, ToDirectInitRange) = *Imp;
+
+ SmallVector<Expr *, 4> ToPlacementArgs(E->getNumPlacementArgs());
+ if (Error Err =
+ ImportContainerChecked(E->placement_arguments(), ToPlacementArgs))
+ return std::move(Err);
return new (Importer.getToContext()) CXXNewExpr(
- Importer.getToContext(),
- CE->isGlobalNew(),
- OperatorNewDecl, OperatorDeleteDecl,
- CE->passAlignment(),
- CE->doesUsualArrayDeleteWantSize(),
- PlacementArgs,
- Importer.Import(CE->getTypeIdParens()),
- ToArrSize, CE->getInitializationStyle(), ToInit, T, TInfo,
- Importer.Import(CE->getSourceRange()),
- Importer.Import(CE->getDirectInitRange()));
-}
-
-Expr *ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ Importer.getToContext(), E->isGlobalNew(), ToOperatorNew,
+ ToOperatorDelete, E->passAlignment(), E->doesUsualArrayDeleteWantSize(),
+ ToPlacementArgs, ToTypeIdParens, ToArraySize, E->getInitializationStyle(),
+ ToInitializer, ToType, ToAllocatedTypeSourceInfo, ToSourceRange,
+ ToDirectInitRange);
+}
- auto *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
- Importer.Import(E->getOperatorDelete()));
- if (!OperatorDeleteDecl && E->getOperatorDelete())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getOperatorDelete(), E->getArgument(), E->getBeginLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToArg = Importer.Import(E->getArgument());
- if (!ToArg && E->getArgument())
- return nullptr;
+ QualType ToType;
+ FunctionDecl *ToOperatorDelete;
+ Expr *ToArgument;
+ SourceLocation ToBeginLoc;
+ std::tie(ToType, ToOperatorDelete, ToArgument, ToBeginLoc) = *Imp;
return new (Importer.getToContext()) CXXDeleteExpr(
- T, E->isGlobalDelete(), E->isArrayForm(), E->isArrayFormAsWritten(),
- E->doesUsualArrayDeleteWantSize(), OperatorDeleteDecl, ToArg,
- Importer.Import(E->getBeginLoc()));
+ ToType, E->isGlobalDelete(), E->isArrayForm(), E->isArrayFormAsWritten(),
+ E->doesUsualArrayDeleteWantSize(), ToOperatorDelete, ToArgument,
+ ToBeginLoc);
}
-Expr *ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getLocation(), E->getConstructor(),
+ E->getParenOrBraceRange());
+ if (!Imp)
+ return Imp.takeError();
- auto *ToCCD =
- dyn_cast_or_null<CXXConstructorDecl>(Importer.Import(E->getConstructor()));
- if (!ToCCD)
- return nullptr;
+ QualType ToType;
+ SourceLocation ToLocation;
+ CXXConstructorDecl *ToConstructor;
+ SourceRange ToParenOrBraceRange;
+ std::tie(ToType, ToLocation, ToConstructor, ToParenOrBraceRange) = *Imp;
SmallVector<Expr *, 6> ToArgs(E->getNumArgs());
- if (ImportContainerChecked(E->arguments(), ToArgs))
- return nullptr;
+ if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
+ return std::move(Err);
- return CXXConstructExpr::Create(Importer.getToContext(), T,
- Importer.Import(E->getLocation()),
- ToCCD, E->isElidable(),
- ToArgs, E->hadMultipleCandidates(),
- E->isListInitialization(),
- E->isStdInitListInitialization(),
- E->requiresZeroInitialization(),
- E->getConstructionKind(),
- Importer.Import(E->getParenOrBraceRange()));
+ return CXXConstructExpr::Create(
+ Importer.getToContext(), ToType, ToLocation, ToConstructor,
+ E->isElidable(), ToArgs, E->hadMultipleCandidates(),
+ E->isListInitialization(), E->isStdInitListInitialization(),
+ E->requiresZeroInitialization(), E->getConstructionKind(),
+ ToParenOrBraceRange);
}
-Expr *ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *EWC) {
- Expr *SubExpr = Importer.Import(EWC->getSubExpr());
- if (!SubExpr && EWC->getSubExpr())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- SmallVector<ExprWithCleanups::CleanupObject, 8> Objs(EWC->getNumObjects());
- for (unsigned I = 0, E = EWC->getNumObjects(); I < E; I++)
- if (ExprWithCleanups::CleanupObject Obj =
- cast_or_null<BlockDecl>(Importer.Import(EWC->getObject(I))))
- Objs[I] = Obj;
- else
- return nullptr;
+ SmallVector<ExprWithCleanups::CleanupObject, 8> ToObjects(E->getNumObjects());
+ if (Error Err = ImportContainerChecked(E->getObjects(), ToObjects))
+ return std::move(Err);
- return ExprWithCleanups::Create(Importer.getToContext(),
- SubExpr, EWC->cleanupsHaveSideEffects(),
- Objs);
+ return ExprWithCleanups::Create(
+ Importer.getToContext(), *ToSubExprOrErr, E->cleanupsHaveSideEffects(),
+ ToObjects);
}
-Expr *ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ auto Imp = importSeq(
+ E->getCallee(), E->getType(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToFn = Importer.Import(E->getCallee());
- if (!ToFn)
- return nullptr;
+ Expr *ToCallee;
+ QualType ToType;
+ SourceLocation ToRParenLoc;
+ std::tie(ToCallee, ToType, ToRParenLoc) = *Imp;
SmallVector<Expr *, 4> ToArgs(E->getNumArgs());
- if (ImportContainerChecked(E->arguments(), ToArgs))
- return nullptr;
+ if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
+ return std::move(Err);
return new (Importer.getToContext()) CXXMemberCallExpr(
- Importer.getToContext(), ToFn, ToArgs, T, E->getValueKind(),
- Importer.Import(E->getRParenLoc()));
-}
-
-Expr *ASTNodeImporter::VisitCXXThisExpr(CXXThisExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- return new (Importer.getToContext())
- CXXThisExpr(Importer.Import(E->getLocation()), T, E->isImplicit());
-}
-
-Expr *ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- return new (Importer.getToContext())
- CXXBoolLiteralExpr(E->getValue(), T, Importer.Import(E->getLocation()));
-}
-
-
-Expr *ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *ToBase = Importer.Import(E->getBase());
- if (!ToBase && E->getBase())
- return nullptr;
-
- auto *ToMember = dyn_cast<ValueDecl>(Importer.Import(E->getMemberDecl()));
- if (!ToMember && E->getMemberDecl())
- return nullptr;
-
- auto *ToDecl =
- dyn_cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl().getDecl()));
- if (!ToDecl && E->getFoundDecl().getDecl())
- return nullptr;
+ Importer.getToContext(), ToCallee, ToArgs, ToType, E->getValueKind(),
+ ToRParenLoc);
+}
+
+ExpectedStmt ASTNodeImporter::VisitCXXThisExpr(CXXThisExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return new (Importer.getToContext()) CXXThisExpr(
+ *ToLocationOrErr, *ToTypeOrErr, E->isImplicit());
+}
+
+ExpectedStmt ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return new (Importer.getToContext()) CXXBoolLiteralExpr(
+ E->getValue(), *ToTypeOrErr, *ToLocationOrErr);
+}
+
+ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
+ auto Imp1 = importSeq(
+ E->getBase(), E->getOperatorLoc(), E->getQualifierLoc(),
+ E->getTemplateKeywordLoc(), E->getMemberDecl(), E->getType());
+ if (!Imp1)
+ return Imp1.takeError();
+
+ Expr *ToBase;
+ SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ ValueDecl *ToMemberDecl;
+ QualType ToType;
+ std::tie(
+ ToBase, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc, ToMemberDecl,
+ ToType) = *Imp1;
+
+ auto Imp2 = importSeq(
+ E->getFoundDecl().getDecl(), E->getMemberNameInfo().getName(),
+ E->getMemberNameInfo().getLoc(), E->getLAngleLoc(), E->getRAngleLoc());
+ if (!Imp2)
+ return Imp2.takeError();
+ NamedDecl *ToDecl;
+ DeclarationName ToName;
+ SourceLocation ToLoc, ToLAngleLoc, ToRAngleLoc;
+ std::tie(ToDecl, ToName, ToLoc, ToLAngleLoc, ToRAngleLoc) = *Imp2;
DeclAccessPair ToFoundDecl =
DeclAccessPair::make(ToDecl, E->getFoundDecl().getAccess());
- DeclarationNameInfo ToMemberNameInfo(
- Importer.Import(E->getMemberNameInfo().getName()),
- Importer.Import(E->getMemberNameInfo().getLoc()));
+ DeclarationNameInfo ToMemberNameInfo(ToName, ToLoc);
if (E->hasExplicitTemplateArgs()) {
- return nullptr; // FIXME: handle template arguments
+ // FIXME: handle template arguments
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
- return MemberExpr::Create(Importer.getToContext(), ToBase,
- E->isArrow(),
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()),
- ToMember, ToFoundDecl, ToMemberNameInfo,
- nullptr, T, E->getValueKind(),
- E->getObjectKind());
+ return MemberExpr::Create(
+ Importer.getToContext(), ToBase, E->isArrow(), ToOperatorLoc,
+ ToQualifierLoc, ToTemplateKeywordLoc, ToMemberDecl, ToFoundDecl,
+ ToMemberNameInfo, nullptr, ToType, E->getValueKind(), E->getObjectKind());
}
-Expr *ASTNodeImporter::VisitCXXPseudoDestructorExpr(
- CXXPseudoDestructorExpr *E) {
- Expr *BaseE = Importer.Import(E->getBase());
- if (!BaseE)
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ auto Imp = importSeq(
+ E->getBase(), E->getOperatorLoc(), E->getQualifierLoc(),
+ E->getScopeTypeInfo(), E->getColonColonLoc(), E->getTildeLoc());
+ if (!Imp)
+ return Imp.takeError();
- TypeSourceInfo *ScopeInfo = Importer.Import(E->getScopeTypeInfo());
- if (!ScopeInfo && E->getScopeTypeInfo())
- return nullptr;
+ Expr *ToBase;
+ SourceLocation ToOperatorLoc, ToColonColonLoc, ToTildeLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ TypeSourceInfo *ToScopeTypeInfo;
+ std::tie(
+ ToBase, ToOperatorLoc, ToQualifierLoc, ToScopeTypeInfo, ToColonColonLoc,
+ ToTildeLoc) = *Imp;
PseudoDestructorTypeStorage Storage;
if (IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) {
IdentifierInfo *ToII = Importer.Import(FromII);
- if (!ToII)
- return nullptr;
- Storage = PseudoDestructorTypeStorage(
- ToII, Importer.Import(E->getDestroyedTypeLoc()));
+ ExpectedSLoc ToDestroyedTypeLocOrErr = import(E->getDestroyedTypeLoc());
+ if (!ToDestroyedTypeLocOrErr)
+ return ToDestroyedTypeLocOrErr.takeError();
+ Storage = PseudoDestructorTypeStorage(ToII, *ToDestroyedTypeLocOrErr);
} else {
- TypeSourceInfo *TI = Importer.Import(E->getDestroyedTypeInfo());
- if (!TI)
- return nullptr;
- Storage = PseudoDestructorTypeStorage(TI);
+ if (auto ToTIOrErr = import(E->getDestroyedTypeInfo()))
+ Storage = PseudoDestructorTypeStorage(*ToTIOrErr);
+ else
+ return ToTIOrErr.takeError();
}
return new (Importer.getToContext()) CXXPseudoDestructorExpr(
- Importer.getToContext(), BaseE, E->isArrow(),
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getQualifierLoc()),
- ScopeInfo, Importer.Import(E->getColonColonLoc()),
- Importer.Import(E->getTildeLoc()), Storage);
+ Importer.getToContext(), ToBase, E->isArrow(), ToOperatorLoc,
+ ToQualifierLoc, ToScopeTypeInfo, ToColonColonLoc, ToTildeLoc, Storage);
}
-Expr *ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
+ExpectedStmt ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
CXXDependentScopeMemberExpr *E) {
- Expr *Base = nullptr;
+ auto Imp = importSeq(
+ E->getType(), E->getOperatorLoc(), E->getQualifierLoc(),
+ E->getTemplateKeywordLoc(), E->getFirstQualifierFoundInScope());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ NamedDecl *ToFirstQualifierFoundInScope;
+ std::tie(
+ ToType, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc,
+ ToFirstQualifierFoundInScope) = *Imp;
+
+ Expr *ToBase = nullptr;
if (!E->isImplicitAccess()) {
- Base = Importer.Import(E->getBase());
- if (!Base)
- return nullptr;
+ if (ExpectedExpr ToBaseOrErr = import(E->getBase()))
+ ToBase = *ToBaseOrErr;
+ else
+ return ToBaseOrErr.takeError();
}
- QualType BaseType = Importer.Import(E->getBaseType());
- if (BaseType.isNull())
- return nullptr;
-
TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(),
- E->template_arguments(), ToTAInfo))
- return nullptr;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(),
+ ToTAInfo))
+ return std::move(Err);
ResInfo = &ToTAInfo;
}
- DeclarationName Name = Importer.Import(E->getMember());
- if (!E->getMember().isEmpty() && Name.isEmpty())
- return nullptr;
-
- DeclarationNameInfo MemberNameInfo(Name, Importer.Import(E->getMemberLoc()));
+ auto ToMemberNameInfoOrErr = importSeq(E->getMember(), E->getMemberLoc());
+ if (!ToMemberNameInfoOrErr)
+ return ToMemberNameInfoOrErr.takeError();
+ DeclarationNameInfo ToMemberNameInfo(
+ std::get<0>(*ToMemberNameInfoOrErr), std::get<1>(*ToMemberNameInfoOrErr));
// Import additional name location/type info.
- ImportDeclarationNameLoc(E->getMemberNameInfo(), MemberNameInfo);
- auto ToFQ = Importer.Import(E->getFirstQualifierFoundInScope());
- if (!ToFQ && E->getFirstQualifierFoundInScope())
- return nullptr;
+ if (Error Err = ImportDeclarationNameLoc(
+ E->getMemberNameInfo(), ToMemberNameInfo))
+ return std::move(Err);
return CXXDependentScopeMemberExpr::Create(
- Importer.getToContext(), Base, BaseType, E->isArrow(),
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()),
- cast_or_null<NamedDecl>(ToFQ), MemberNameInfo, ResInfo);
+ Importer.getToContext(), ToBase, ToType, E->isArrow(), ToOperatorLoc,
+ ToQualifierLoc, ToTemplateKeywordLoc, ToFirstQualifierFoundInScope,
+ ToMemberNameInfo, ResInfo);
}
-Expr *
+ExpectedStmt
ASTNodeImporter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
- DeclarationName Name = Importer.Import(E->getDeclName());
- if (!E->getDeclName().isEmpty() && Name.isEmpty())
- return nullptr;
-
- DeclarationNameInfo NameInfo(Name, Importer.Import(E->getExprLoc()));
- ImportDeclarationNameLoc(E->getNameInfo(), NameInfo);
-
- TemplateArgumentListInfo ToTAInfo(Importer.Import(E->getLAngleLoc()),
- Importer.Import(E->getRAngleLoc()));
+ auto Imp = importSeq(
+ E->getQualifierLoc(), E->getTemplateKeywordLoc(), E->getDeclName(),
+ E->getExprLoc(), E->getLAngleLoc(), E->getRAngleLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ NestedNameSpecifierLoc ToQualifierLoc;
+ SourceLocation ToTemplateKeywordLoc, ToExprLoc, ToLAngleLoc, ToRAngleLoc;
+ DeclarationName ToDeclName;
+ std::tie(
+ ToQualifierLoc, ToTemplateKeywordLoc, ToDeclName, ToExprLoc,
+ ToLAngleLoc, ToRAngleLoc) = *Imp;
+
+ DeclarationNameInfo ToNameInfo(ToDeclName, ToExprLoc);
+ if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
+ return std::move(Err);
+
+ TemplateArgumentListInfo ToTAInfo(ToLAngleLoc, ToRAngleLoc);
TemplateArgumentListInfo *ResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
- return nullptr;
+ if (Error Err =
+ ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ return std::move(Err);
ResInfo = &ToTAInfo;
}
return DependentScopeDeclRefExpr::Create(
- Importer.getToContext(), Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()), NameInfo, ResInfo);
+ Importer.getToContext(), ToQualifierLoc, ToTemplateKeywordLoc,
+ ToNameInfo, ResInfo);
}
-Expr *ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
- CXXUnresolvedConstructExpr *CE) {
- unsigned NumArgs = CE->arg_size();
+ExpectedStmt ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *E) {
+ auto Imp = importSeq(
+ E->getLParenLoc(), E->getRParenLoc(), E->getTypeSourceInfo());
+ if (!Imp)
+ return Imp.takeError();
- SmallVector<Expr *, 8> ToArgs(NumArgs);
- if (ImportArrayChecked(CE->arg_begin(), CE->arg_end(), ToArgs.begin()))
- return nullptr;
+ SourceLocation ToLParenLoc, ToRParenLoc;
+ TypeSourceInfo *ToTypeSourceInfo;
+ std::tie(ToLParenLoc, ToRParenLoc, ToTypeSourceInfo) = *Imp;
+
+ SmallVector<Expr *, 8> ToArgs(E->arg_size());
+ if (Error Err =
+ ImportArrayChecked(E->arg_begin(), E->arg_end(), ToArgs.begin()))
+ return std::move(Err);
return CXXUnresolvedConstructExpr::Create(
- Importer.getToContext(), Importer.Import(CE->getTypeSourceInfo()),
- Importer.Import(CE->getLParenLoc()), llvm::makeArrayRef(ToArgs),
- Importer.Import(CE->getRParenLoc()));
+ Importer.getToContext(), ToTypeSourceInfo, ToLParenLoc,
+ llvm::makeArrayRef(ToArgs), ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
- auto *NamingClass =
- cast_or_null<CXXRecordDecl>(Importer.Import(E->getNamingClass()));
- if (E->getNamingClass() && !NamingClass)
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ Expected<CXXRecordDecl *> ToNamingClassOrErr = import(E->getNamingClass());
+ if (!ToNamingClassOrErr)
+ return ToNamingClassOrErr.takeError();
- DeclarationName Name = Importer.Import(E->getName());
- if (E->getName() && !Name)
- return nullptr;
+ auto ToQualifierLocOrErr = import(E->getQualifierLoc());
+ if (!ToQualifierLocOrErr)
+ return ToQualifierLocOrErr.takeError();
- DeclarationNameInfo NameInfo(Name, Importer.Import(E->getNameLoc()));
+ auto ToNameInfoOrErr = importSeq(E->getName(), E->getNameLoc());
+ if (!ToNameInfoOrErr)
+ return ToNameInfoOrErr.takeError();
+ DeclarationNameInfo ToNameInfo(
+ std::get<0>(*ToNameInfoOrErr), std::get<1>(*ToNameInfoOrErr));
// Import additional name location/type info.
- ImportDeclarationNameLoc(E->getNameInfo(), NameInfo);
+ if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
+ return std::move(Err);
UnresolvedSet<8> ToDecls;
- for (auto *D : E->decls()) {
- if (auto *To = cast_or_null<NamedDecl>(Importer.Import(D)))
- ToDecls.addDecl(To);
+ for (auto *D : E->decls())
+ if (auto ToDOrErr = import(D))
+ ToDecls.addDecl(cast<NamedDecl>(*ToDOrErr));
else
- return nullptr;
- }
+ return ToDOrErr.takeError();
- TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr;
- if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(),
- E->template_arguments(), ToTAInfo))
- return nullptr;
- ResInfo = &ToTAInfo;
- }
+ if (E->hasExplicitTemplateArgs() && E->getTemplateKeywordLoc().isValid()) {
+ TemplateArgumentListInfo ToTAInfo;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(),
+ ToTAInfo))
+ return std::move(Err);
+
+ ExpectedSLoc ToTemplateKeywordLocOrErr = import(E->getTemplateKeywordLoc());
+ if (!ToTemplateKeywordLocOrErr)
+ return ToTemplateKeywordLocOrErr.takeError();
- if (ResInfo || E->getTemplateKeywordLoc().isValid())
return UnresolvedLookupExpr::Create(
- Importer.getToContext(), NamingClass,
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()), NameInfo, E->requiresADL(),
- ResInfo, ToDecls.begin(), ToDecls.end());
+ Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr,
+ *ToTemplateKeywordLocOrErr, ToNameInfo, E->requiresADL(), &ToTAInfo,
+ ToDecls.begin(), ToDecls.end());
+ }
return UnresolvedLookupExpr::Create(
- Importer.getToContext(), NamingClass,
- Importer.Import(E->getQualifierLoc()), NameInfo, E->requiresADL(),
- E->isOverloaded(), ToDecls.begin(), ToDecls.end());
-}
-
-Expr *ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
- DeclarationName Name = Importer.Import(E->getName());
- if (!E->getName().isEmpty() && Name.isEmpty())
- return nullptr;
- DeclarationNameInfo NameInfo(Name, Importer.Import(E->getNameLoc()));
+ Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr,
+ ToNameInfo, E->requiresADL(), E->isOverloaded(), ToDecls.begin(),
+ ToDecls.end());
+}
+
+ExpectedStmt
+ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
+ auto Imp1 = importSeq(
+ E->getType(), E->getOperatorLoc(), E->getQualifierLoc(),
+ E->getTemplateKeywordLoc());
+ if (!Imp1)
+ return Imp1.takeError();
+
+ QualType ToType;
+ SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ std::tie(ToType, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc) = *Imp1;
+
+ auto Imp2 = importSeq(E->getName(), E->getNameLoc());
+ if (!Imp2)
+ return Imp2.takeError();
+ DeclarationNameInfo ToNameInfo(std::get<0>(*Imp2), std::get<1>(*Imp2));
// Import additional name location/type info.
- ImportDeclarationNameLoc(E->getNameInfo(), NameInfo);
-
- QualType BaseType = Importer.Import(E->getType());
- if (!E->getType().isNull() && BaseType.isNull())
- return nullptr;
+ if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
+ return std::move(Err);
UnresolvedSet<8> ToDecls;
- for (Decl *D : E->decls()) {
- if (NamedDecl *To = cast_or_null<NamedDecl>(Importer.Import(D)))
- ToDecls.addDecl(To);
+ for (Decl *D : E->decls())
+ if (auto ToDOrErr = import(D))
+ ToDecls.addDecl(cast<NamedDecl>(*ToDOrErr));
else
- return nullptr;
- }
+ return ToDOrErr.takeError();
TemplateArgumentListInfo ToTAInfo;
TemplateArgumentListInfo *ResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
- return nullptr;
+ if (Error Err =
+ ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ return std::move(Err);
ResInfo = &ToTAInfo;
}
- Expr *BaseE = E->isImplicitAccess() ? nullptr : Importer.Import(E->getBase());
- if (!BaseE && !E->isImplicitAccess() && E->getBase()) {
- return nullptr;
+ Expr *ToBase = nullptr;
+ if (!E->isImplicitAccess()) {
+ if (ExpectedExpr ToBaseOrErr = import(E->getBase()))
+ ToBase = *ToBaseOrErr;
+ else
+ return ToBaseOrErr.takeError();
}
return UnresolvedMemberExpr::Create(
- Importer.getToContext(), E->hasUnresolvedUsing(), BaseE, BaseType,
- E->isArrow(), Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()), NameInfo, ResInfo,
- ToDecls.begin(), ToDecls.end());
+ Importer.getToContext(), E->hasUnresolvedUsing(), ToBase, ToType,
+ E->isArrow(), ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc,
+ ToNameInfo, ResInfo, ToDecls.begin(), ToDecls.end());
}
-Expr *ASTNodeImporter::VisitCallExpr(CallExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCallExpr(CallExpr *E) {
+ auto Imp = importSeq(E->getCallee(), E->getType(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToCallee = Importer.Import(E->getCallee());
- if (!ToCallee && E->getCallee())
- return nullptr;
+ Expr *ToCallee;
+ QualType ToType;
+ SourceLocation ToRParenLoc;
+ std::tie(ToCallee, ToType, ToRParenLoc) = *Imp;
unsigned NumArgs = E->getNumArgs();
- SmallVector<Expr *, 2> ToArgs(NumArgs);
- if (ImportContainerChecked(E->arguments(), ToArgs))
- return nullptr;
-
- auto **ToArgs_Copied = new (Importer.getToContext()) Expr*[NumArgs];
-
- for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai)
- ToArgs_Copied[ai] = ToArgs[ai];
+ llvm::SmallVector<Expr *, 2> ToArgs(NumArgs);
+ if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
+ return std::move(Err);
if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
return new (Importer.getToContext()) CXXOperatorCallExpr(
- Importer.getToContext(), OCE->getOperator(), ToCallee, ToArgs, T,
- OCE->getValueKind(), Importer.Import(OCE->getRParenLoc()),
- OCE->getFPFeatures());
+ Importer.getToContext(), OCE->getOperator(), ToCallee, ToArgs, ToType,
+ OCE->getValueKind(), ToRParenLoc, OCE->getFPFeatures(),
+ OCE->getADLCallKind());
}
- return new (Importer.getToContext())
- CallExpr(Importer.getToContext(), ToCallee,
- llvm::makeArrayRef(ToArgs_Copied, NumArgs), T, E->getValueKind(),
- Importer.Import(E->getRParenLoc()));
+ return new (Importer.getToContext()) CallExpr(
+ Importer.getToContext(), ToCallee, ToArgs, ToType, E->getValueKind(),
+ ToRParenLoc, /*MinNumArgs=*/0, E->getADLCallKind());
}
-Optional<LambdaCapture>
-ASTNodeImporter::ImportLambdaCapture(const LambdaCapture &From) {
- VarDecl *Var = nullptr;
- if (From.capturesVariable()) {
- Var = cast_or_null<VarDecl>(Importer.Import(From.getCapturedVar()));
- if (!Var)
- return None;
- }
-
- return LambdaCapture(Importer.Import(From.getLocation()), From.isImplicit(),
- From.getCaptureKind(), Var,
- From.isPackExpansion()
- ? Importer.Import(From.getEllipsisLoc())
- : SourceLocation());
-}
-
-Expr *ASTNodeImporter::VisitLambdaExpr(LambdaExpr *LE) {
- CXXRecordDecl *FromClass = LE->getLambdaClass();
- auto *ToClass = dyn_cast_or_null<CXXRecordDecl>(Importer.Import(FromClass));
- if (!ToClass)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitLambdaExpr(LambdaExpr *E) {
+ CXXRecordDecl *FromClass = E->getLambdaClass();
+ auto ToClassOrErr = import(FromClass);
+ if (!ToClassOrErr)
+ return ToClassOrErr.takeError();
+ CXXRecordDecl *ToClass = *ToClassOrErr;
// NOTE: lambda classes are created with BeingDefined flag set up.
// It means that ImportDefinition doesn't work for them and we should fill it
// manually.
if (ToClass->isBeingDefined()) {
for (auto FromField : FromClass->fields()) {
- auto *ToField = cast_or_null<FieldDecl>(Importer.Import(FromField));
- if (!ToField)
- return nullptr;
+ auto ToFieldOrErr = import(FromField);
+ if (!ToFieldOrErr)
+ return ToFieldOrErr.takeError();
}
}
- auto *ToCallOp = dyn_cast_or_null<CXXMethodDecl>(
- Importer.Import(LE->getCallOperator()));
- if (!ToCallOp)
- return nullptr;
+ auto ToCallOpOrErr = import(E->getCallOperator());
+ if (!ToCallOpOrErr)
+ return ToCallOpOrErr.takeError();
ToClass->completeDefinition();
- unsigned NumCaptures = LE->capture_size();
- SmallVector<LambdaCapture, 8> Captures;
- Captures.reserve(NumCaptures);
- for (const auto &FromCapture : LE->captures()) {
- if (auto ToCapture = ImportLambdaCapture(FromCapture))
- Captures.push_back(*ToCapture);
+ SmallVector<LambdaCapture, 8> ToCaptures;
+ ToCaptures.reserve(E->capture_size());
+ for (const auto &FromCapture : E->captures()) {
+ if (auto ToCaptureOrErr = import(FromCapture))
+ ToCaptures.push_back(*ToCaptureOrErr);
else
- return nullptr;
+ return ToCaptureOrErr.takeError();
}
- SmallVector<Expr *, 8> InitCaptures(NumCaptures);
- if (ImportContainerChecked(LE->capture_inits(), InitCaptures))
- return nullptr;
+ SmallVector<Expr *, 8> ToCaptureInits(E->capture_size());
+ if (Error Err = ImportContainerChecked(E->capture_inits(), ToCaptureInits))
+ return std::move(Err);
+
+ auto Imp = importSeq(
+ E->getIntroducerRange(), E->getCaptureDefaultLoc(), E->getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceRange ToIntroducerRange;
+ SourceLocation ToCaptureDefaultLoc, ToEndLoc;
+ std::tie(ToIntroducerRange, ToCaptureDefaultLoc, ToEndLoc) = *Imp;
return LambdaExpr::Create(
- Importer.getToContext(), ToClass,
- Importer.Import(LE->getIntroducerRange()), LE->getCaptureDefault(),
- Importer.Import(LE->getCaptureDefaultLoc()), Captures,
- LE->hasExplicitParameters(), LE->hasExplicitResultType(), InitCaptures,
- Importer.Import(LE->getEndLoc()), LE->containsUnexpandedParameterPack());
+ Importer.getToContext(), ToClass, ToIntroducerRange,
+ E->getCaptureDefault(), ToCaptureDefaultLoc, ToCaptures,
+ E->hasExplicitParameters(), E->hasExplicitResultType(), ToCaptureInits,
+ ToEndLoc, E->containsUnexpandedParameterPack());
}
-Expr *ASTNodeImporter::VisitInitListExpr(InitListExpr *ILE) {
- QualType T = Importer.Import(ILE->getType());
- if (T.isNull())
- return nullptr;
- SmallVector<Expr *, 4> Exprs(ILE->getNumInits());
- if (ImportContainerChecked(ILE->inits(), Exprs))
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitInitListExpr(InitListExpr *E) {
+ auto Imp = importSeq(E->getLBraceLoc(), E->getRBraceLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToLBraceLoc, ToRBraceLoc;
+ QualType ToType;
+ std::tie(ToLBraceLoc, ToRBraceLoc, ToType) = *Imp;
+
+ SmallVector<Expr *, 4> ToExprs(E->getNumInits());
+ if (Error Err = ImportContainerChecked(E->inits(), ToExprs))
+ return std::move(Err);
ASTContext &ToCtx = Importer.getToContext();
InitListExpr *To = new (ToCtx) InitListExpr(
- ToCtx, Importer.Import(ILE->getLBraceLoc()),
- Exprs, Importer.Import(ILE->getLBraceLoc()));
- To->setType(T);
+ ToCtx, ToLBraceLoc, ToExprs, ToRBraceLoc);
+ To->setType(ToType);
- if (ILE->hasArrayFiller()) {
- Expr *Filler = Importer.Import(ILE->getArrayFiller());
- if (!Filler)
- return nullptr;
- To->setArrayFiller(Filler);
+ if (E->hasArrayFiller()) {
+ if (ExpectedExpr ToFillerOrErr = import(E->getArrayFiller()))
+ To->setArrayFiller(*ToFillerOrErr);
+ else
+ return ToFillerOrErr.takeError();
}
- if (FieldDecl *FromFD = ILE->getInitializedFieldInUnion()) {
- auto *ToFD = cast_or_null<FieldDecl>(Importer.Import(FromFD));
- if (!ToFD)
- return nullptr;
- To->setInitializedFieldInUnion(ToFD);
+ if (FieldDecl *FromFD = E->getInitializedFieldInUnion()) {
+ if (auto ToFDOrErr = import(FromFD))
+ To->setInitializedFieldInUnion(*ToFDOrErr);
+ else
+ return ToFDOrErr.takeError();
}
- if (InitListExpr *SyntForm = ILE->getSyntacticForm()) {
- auto *ToSyntForm = cast_or_null<InitListExpr>(Importer.Import(SyntForm));
- if (!ToSyntForm)
- return nullptr;
- To->setSyntacticForm(ToSyntForm);
+ if (InitListExpr *SyntForm = E->getSyntacticForm()) {
+ if (auto ToSyntFormOrErr = import(SyntForm))
+ To->setSyntacticForm(*ToSyntFormOrErr);
+ else
+ return ToSyntFormOrErr.takeError();
}
// Copy InitListExprBitfields, which are not handled in the ctor of
// InitListExpr.
- To->sawArrayRangeDesignator(ILE->hadArrayRangeDesignator());
+ To->sawArrayRangeDesignator(E->hadArrayRangeDesignator());
return To;
}
-Expr *ASTNodeImporter::VisitCXXStdInitializerListExpr(
+ExpectedStmt ASTNodeImporter::VisitCXXStdInitializerListExpr(
CXXStdInitializerListExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- Expr *SE = Importer.Import(E->getSubExpr());
- if (!SE)
- return nullptr;
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- return new (Importer.getToContext()) CXXStdInitializerListExpr(T, SE);
+ return new (Importer.getToContext()) CXXStdInitializerListExpr(
+ *ToTypeOrErr, *ToSubExprOrErr);
}
-Expr *ASTNodeImporter::VisitCXXInheritedCtorInitExpr(
+ExpectedStmt ASTNodeImporter::VisitCXXInheritedCtorInitExpr(
CXXInheritedCtorInitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ auto Imp = importSeq(E->getLocation(), E->getType(), E->getConstructor());
+ if (!Imp)
+ return Imp.takeError();
- auto *Ctor = cast_or_null<CXXConstructorDecl>(Importer.Import(
- E->getConstructor()));
- if (!Ctor)
- return nullptr;
+ SourceLocation ToLocation;
+ QualType ToType;
+ CXXConstructorDecl *ToConstructor;
+ std::tie(ToLocation, ToType, ToConstructor) = *Imp;
return new (Importer.getToContext()) CXXInheritedCtorInitExpr(
- Importer.Import(E->getLocation()), T, Ctor,
- E->constructsVBase(), E->inheritedFromVBase());
+ ToLocation, ToType, ToConstructor, E->constructsVBase(),
+ E->inheritedFromVBase());
}
-Expr *ASTNodeImporter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
+ auto Imp = importSeq(E->getType(), E->getCommonExpr(), E->getSubExpr());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToCommon = Importer.Import(E->getCommonExpr());
- if (!ToCommon && E->getCommonExpr())
- return nullptr;
+ QualType ToType;
+ Expr *ToCommonExpr, *ToSubExpr;
+ std::tie(ToType, ToCommonExpr, ToSubExpr) = *Imp;
- Expr *ToSubExpr = Importer.Import(E->getSubExpr());
- if (!ToSubExpr && E->getSubExpr())
- return nullptr;
-
- return new (Importer.getToContext())
- ArrayInitLoopExpr(ToType, ToCommon, ToSubExpr);
+ return new (Importer.getToContext()) ArrayInitLoopExpr(
+ ToType, ToCommonExpr, ToSubExpr);
}
-Expr *ASTNodeImporter::VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull())
- return nullptr;
- return new (Importer.getToContext()) ArrayInitIndexExpr(ToType);
+ExpectedStmt ASTNodeImporter::VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return new (Importer.getToContext()) ArrayInitIndexExpr(*ToTypeOrErr);
}
-Expr *ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
- auto *ToField = dyn_cast_or_null<FieldDecl>(Importer.Import(DIE->getField()));
- if (!ToField && DIE->getField())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
+ ExpectedSLoc ToBeginLocOrErr = import(E->getBeginLoc());
+ if (!ToBeginLocOrErr)
+ return ToBeginLocOrErr.takeError();
+
+ auto ToFieldOrErr = import(E->getField());
+ if (!ToFieldOrErr)
+ return ToFieldOrErr.takeError();
return CXXDefaultInitExpr::Create(
- Importer.getToContext(), Importer.Import(DIE->getBeginLoc()), ToField);
+ Importer.getToContext(), *ToBeginLocOrErr, *ToFieldOrErr);
}
-Expr *ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull() && !E->getType().isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getSubExpr(), E->getTypeInfoAsWritten(),
+ E->getOperatorLoc(), E->getRParenLoc(), E->getAngleBrackets());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ Expr *ToSubExpr;
+ TypeSourceInfo *ToTypeInfoAsWritten;
+ SourceLocation ToOperatorLoc, ToRParenLoc;
+ SourceRange ToAngleBrackets;
+ std::tie(
+ ToType, ToSubExpr, ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc,
+ ToAngleBrackets) = *Imp;
+
ExprValueKind VK = E->getValueKind();
CastKind CK = E->getCastKind();
- Expr *ToOp = Importer.Import(E->getSubExpr());
- if (!ToOp && E->getSubExpr())
- return nullptr;
- CXXCastPath BasePath;
- if (ImportCastPath(E, BasePath))
- return nullptr;
- TypeSourceInfo *ToWritten = Importer.Import(E->getTypeInfoAsWritten());
- SourceLocation ToOperatorLoc = Importer.Import(E->getOperatorLoc());
- SourceLocation ToRParenLoc = Importer.Import(E->getRParenLoc());
- SourceRange ToAngleBrackets = Importer.Import(E->getAngleBrackets());
+ auto ToBasePathOrErr = ImportCastPath(E);
+ if (!ToBasePathOrErr)
+ return ToBasePathOrErr.takeError();
if (isa<CXXStaticCastExpr>(E)) {
return CXXStaticCastExpr::Create(
- Importer.getToContext(), ToType, VK, CK, ToOp, &BasePath,
- ToWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
+ Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr),
+ ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
} else if (isa<CXXDynamicCastExpr>(E)) {
return CXXDynamicCastExpr::Create(
- Importer.getToContext(), ToType, VK, CK, ToOp, &BasePath,
- ToWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
+ Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr),
+ ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
} else if (isa<CXXReinterpretCastExpr>(E)) {
return CXXReinterpretCastExpr::Create(
- Importer.getToContext(), ToType, VK, CK, ToOp, &BasePath,
- ToWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
+ Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr),
+ ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
} else if (isa<CXXConstCastExpr>(E)) {
- return CXXConstCastExpr::Create(Importer.getToContext(), ToType, VK, ToOp,
- ToWritten, ToOperatorLoc, ToRParenLoc,
- ToAngleBrackets);
+ return CXXConstCastExpr::Create(
+ Importer.getToContext(), ToType, VK, ToSubExpr, ToTypeInfoAsWritten,
+ ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
} else {
- return nullptr;
+ llvm_unreachable("Unknown cast type");
+ return make_error<ImportError>();
}
}
-Expr *ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
+ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- auto *Param = cast_or_null<NonTypeTemplateParmDecl>(
- Importer.Import(E->getParameter()));
- if (!Param)
- return nullptr;
+ auto Imp = importSeq(
+ E->getType(), E->getExprLoc(), E->getParameter(), E->getReplacement());
+ if (!Imp)
+ return Imp.takeError();
- Expr *Replacement = Importer.Import(E->getReplacement());
- if (!Replacement)
- return nullptr;
+ QualType ToType;
+ SourceLocation ToExprLoc;
+ NonTypeTemplateParmDecl *ToParameter;
+ Expr *ToReplacement;
+ std::tie(ToType, ToExprLoc, ToParameter, ToReplacement) = *Imp;
return new (Importer.getToContext()) SubstNonTypeTemplateParmExpr(
- T, E->getValueKind(), Importer.Import(E->getExprLoc()), Param,
- Replacement);
+ ToType, E->getValueKind(), ToExprLoc, ToParameter, ToReplacement);
}
-Expr *ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getBeginLoc(), E->getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ SourceLocation ToBeginLoc, ToEndLoc;
+ std::tie(ToType, ToBeginLoc, ToEndLoc) = *Imp;
SmallVector<TypeSourceInfo *, 4> ToArgs(E->getNumArgs());
- if (ImportContainerChecked(E->getArgs(), ToArgs))
- return nullptr;
+ if (Error Err = ImportContainerChecked(E->getArgs(), ToArgs))
+ return std::move(Err);
// According to Sema::BuildTypeTrait(), if E is value-dependent,
// Value is always false.
- bool ToValue = false;
- if (!E->isValueDependent())
- ToValue = E->getValue();
+ bool ToValue = (E->isValueDependent() ? false : E->getValue());
return TypeTraitExpr::Create(
- Importer.getToContext(), ToType, Importer.Import(E->getBeginLoc()),
- E->getTrait(), ToArgs, Importer.Import(E->getEndLoc()), ToValue);
+ Importer.getToContext(), ToType, ToBeginLoc, E->getTrait(), ToArgs,
+ ToEndLoc, ToValue);
}
-Expr *ASTNodeImporter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- if (E->isTypeOperand()) {
- TypeSourceInfo *TSI = Importer.Import(E->getTypeOperandSourceInfo());
- if (!TSI)
- return nullptr;
+ auto ToSourceRangeOrErr = import(E->getSourceRange());
+ if (!ToSourceRangeOrErr)
+ return ToSourceRangeOrErr.takeError();
- return new (Importer.getToContext())
- CXXTypeidExpr(ToType, TSI, Importer.Import(E->getSourceRange()));
+ if (E->isTypeOperand()) {
+ if (auto ToTSIOrErr = import(E->getTypeOperandSourceInfo()))
+ return new (Importer.getToContext()) CXXTypeidExpr(
+ *ToTypeOrErr, *ToTSIOrErr, *ToSourceRangeOrErr);
+ else
+ return ToTSIOrErr.takeError();
}
- Expr *Op = Importer.Import(E->getExprOperand());
- if (!Op)
- return nullptr;
+ ExpectedExpr ToExprOperandOrErr = import(E->getExprOperand());
+ if (!ToExprOperandOrErr)
+ return ToExprOperandOrErr.takeError();
- return new (Importer.getToContext())
- CXXTypeidExpr(ToType, Op, Importer.Import(E->getSourceRange()));
+ return new (Importer.getToContext()) CXXTypeidExpr(
+ *ToTypeOrErr, *ToExprOperandOrErr, *ToSourceRangeOrErr);
}
void ASTNodeImporter::ImportOverrides(CXXMethodDecl *ToMethod,
CXXMethodDecl *FromMethod) {
- for (auto *FromOverriddenMethod : FromMethod->overridden_methods())
- ToMethod->addOverriddenMethod(
- cast<CXXMethodDecl>(Importer.Import(const_cast<CXXMethodDecl*>(
- FromOverriddenMethod))));
+ for (auto *FromOverriddenMethod : FromMethod->overridden_methods()) {
+ if (auto ImportedOrErr = import(FromOverriddenMethod))
+ ToMethod->getCanonicalDecl()->addOverriddenMethod(cast<CXXMethodDecl>(
+ (*ImportedOrErr)->getCanonicalDecl()));
+ else
+ consumeError(ImportedOrErr.takeError());
+ }
}
ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
@@ -7021,30 +7687,44 @@ ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
ASTImporter::~ASTImporter() = default;
+Expected<QualType> ASTImporter::Import_New(QualType FromT) {
+ QualType ToT = Import(FromT);
+ if (ToT.isNull() && !FromT.isNull())
+ return make_error<ImportError>();
+ return ToT;
+}
QualType ASTImporter::Import(QualType FromT) {
if (FromT.isNull())
return {};
- const Type *fromTy = FromT.getTypePtr();
+ const Type *FromTy = FromT.getTypePtr();
// Check whether we've already imported this type.
llvm::DenseMap<const Type *, const Type *>::iterator Pos
- = ImportedTypes.find(fromTy);
+ = ImportedTypes.find(FromTy);
if (Pos != ImportedTypes.end())
return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers());
// Import the type
ASTNodeImporter Importer(*this);
- QualType ToT = Importer.Visit(fromTy);
- if (ToT.isNull())
- return ToT;
+ ExpectedType ToTOrErr = Importer.Visit(FromTy);
+ if (!ToTOrErr) {
+ llvm::consumeError(ToTOrErr.takeError());
+ return {};
+ }
// Record the imported type.
- ImportedTypes[fromTy] = ToT.getTypePtr();
+ ImportedTypes[FromTy] = (*ToTOrErr).getTypePtr();
- return ToContext.getQualifiedType(ToT, FromT.getLocalQualifiers());
+ return ToContext.getQualifiedType(*ToTOrErr, FromT.getLocalQualifiers());
}
+Expected<TypeSourceInfo *> ASTImporter::Import_New(TypeSourceInfo *FromTSI) {
+ TypeSourceInfo *ToTSI = Import(FromTSI);
+ if (!ToTSI && FromTSI)
+ return llvm::make_error<ImportError>();
+ return ToTSI;
+}
TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
if (!FromTSI)
return FromTSI;
@@ -7059,24 +7739,30 @@ TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
T, Import(FromTSI->getTypeLoc().getBeginLoc()));
}
+Expected<Attr *> ASTImporter::Import_New(const Attr *FromAttr) {
+ return Import(FromAttr);
+}
Attr *ASTImporter::Import(const Attr *FromAttr) {
Attr *ToAttr = FromAttr->clone(ToContext);
+ // NOTE: Import of SourceRange may fail.
ToAttr->setRange(Import(FromAttr->getRange()));
return ToAttr;
}
-Decl *ASTImporter::GetAlreadyImportedOrNull(Decl *FromD) {
- llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
- if (Pos != ImportedDecls.end()) {
- Decl *ToD = Pos->second;
- // FIXME: move this call to ImportDeclParts().
- ASTNodeImporter(*this).ImportDefinitionIfNeeded(FromD, ToD);
- return ToD;
- } else {
+Decl *ASTImporter::GetAlreadyImportedOrNull(const Decl *FromD) const {
+ auto Pos = ImportedDecls.find(FromD);
+ if (Pos != ImportedDecls.end())
+ return Pos->second;
+ else
return nullptr;
- }
}
+Expected<Decl *> ASTImporter::Import_New(Decl *FromD) {
+ Decl *ToD = Import(FromD);
+ if (!ToD && FromD)
+ return llvm::make_error<ImportError>();
+ return ToD;
+}
Decl *ASTImporter::Import(Decl *FromD) {
if (!FromD)
return nullptr;
@@ -7092,9 +7778,12 @@ Decl *ASTImporter::Import(Decl *FromD) {
}
// Import the type.
- ToD = Importer.Visit(FromD);
- if (!ToD)
+ ExpectedDecl ToDOrErr = Importer.Visit(FromD);
+ if (!ToDOrErr) {
+ llvm::consumeError(ToDOrErr.takeError());
return nullptr;
+ }
+ ToD = *ToDOrErr;
// Notify subclasses.
Imported(FromD, ToD);
@@ -7103,7 +7792,7 @@ Decl *ASTImporter::Import(Decl *FromD) {
return ToD;
}
-DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
+Expected<DeclContext *> ASTImporter::ImportContext(DeclContext *FromDC) {
if (!FromDC)
return FromDC;
@@ -7118,8 +7807,9 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (ToRecord->isCompleteDefinition()) {
// Do nothing.
} else if (FromRecord->isCompleteDefinition()) {
- ASTNodeImporter(*this).ImportDefinition(FromRecord, ToRecord,
- ASTNodeImporter::IDK_Basic);
+ if (Error Err = ASTNodeImporter(*this).ImportDefinition(
+ FromRecord, ToRecord, ASTNodeImporter::IDK_Basic))
+ return std::move(Err);
} else {
CompleteDecl(ToRecord);
}
@@ -7128,8 +7818,9 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (ToEnum->isCompleteDefinition()) {
// Do nothing.
} else if (FromEnum->isCompleteDefinition()) {
- ASTNodeImporter(*this).ImportDefinition(FromEnum, ToEnum,
- ASTNodeImporter::IDK_Basic);
+ if (Error Err = ASTNodeImporter(*this).ImportDefinition(
+ FromEnum, ToEnum, ASTNodeImporter::IDK_Basic))
+ return std::move(Err);
} else {
CompleteDecl(ToEnum);
}
@@ -7138,8 +7829,9 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (ToClass->getDefinition()) {
// Do nothing.
} else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) {
- ASTNodeImporter(*this).ImportDefinition(FromDef, ToClass,
- ASTNodeImporter::IDK_Basic);
+ if (Error Err = ASTNodeImporter(*this).ImportDefinition(
+ FromDef, ToClass, ASTNodeImporter::IDK_Basic))
+ return std::move(Err);
} else {
CompleteDecl(ToClass);
}
@@ -7148,8 +7840,9 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (ToProto->getDefinition()) {
// Do nothing.
} else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) {
- ASTNodeImporter(*this).ImportDefinition(FromDef, ToProto,
- ASTNodeImporter::IDK_Basic);
+ if (Error Err = ASTNodeImporter(*this).ImportDefinition(
+ FromDef, ToProto, ASTNodeImporter::IDK_Basic))
+ return std::move(Err);
} else {
CompleteDecl(ToProto);
}
@@ -7158,6 +7851,12 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
return ToDC;
}
+Expected<Expr *> ASTImporter::Import_New(Expr *FromE) {
+ Expr *ToE = Import(FromE);
+ if (!ToE && FromE)
+ return llvm::make_error<ImportError>();
+ return ToE;
+}
Expr *ASTImporter::Import(Expr *FromE) {
if (!FromE)
return nullptr;
@@ -7165,6 +7864,12 @@ Expr *ASTImporter::Import(Expr *FromE) {
return cast_or_null<Expr>(Import(cast<Stmt>(FromE)));
}
+Expected<Stmt *> ASTImporter::Import_New(Stmt *FromS) {
+ Stmt *ToS = Import(FromS);
+ if (!ToS && FromS)
+ return llvm::make_error<ImportError>();
+ return ToS;
+}
Stmt *ASTImporter::Import(Stmt *FromS) {
if (!FromS)
return nullptr;
@@ -7174,13 +7879,15 @@ Stmt *ASTImporter::Import(Stmt *FromS) {
if (Pos != ImportedStmts.end())
return Pos->second;
- // Import the type
+ // Import the statement.
ASTNodeImporter Importer(*this);
- Stmt *ToS = Importer.Visit(FromS);
- if (!ToS)
+ ExpectedStmt ToSOrErr = Importer.Visit(FromS);
+ if (!ToSOrErr) {
+ llvm::consumeError(ToSOrErr.takeError());
return nullptr;
+ }
- if (auto *ToE = dyn_cast<Expr>(ToS)) {
+ if (auto *ToE = dyn_cast<Expr>(*ToSOrErr)) {
auto *FromE = cast<Expr>(FromS);
// Copy ExprBitfields, which may not be handled in Expr subclasses
// constructors.
@@ -7194,10 +7901,17 @@ Stmt *ASTImporter::Import(Stmt *FromS) {
}
// Record the imported declaration.
- ImportedStmts[FromS] = ToS;
- return ToS;
+ ImportedStmts[FromS] = *ToSOrErr;
+ return *ToSOrErr;
}
+Expected<NestedNameSpecifier *>
+ASTImporter::Import_New(NestedNameSpecifier *FromNNS) {
+ NestedNameSpecifier *ToNNS = Import(FromNNS);
+ if (!ToNNS && FromNNS)
+ return llvm::make_error<ImportError>();
+ return ToNNS;
+}
NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
if (!FromNNS)
return nullptr;
@@ -7251,6 +7965,11 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
llvm_unreachable("Invalid nested name specifier kind");
}
+Expected<NestedNameSpecifierLoc>
+ASTImporter::Import_New(NestedNameSpecifierLoc FromNNS) {
+ NestedNameSpecifierLoc ToNNS = Import(FromNNS);
+ return ToNNS;
+}
NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
// Copied from NestedNameSpecifier mostly.
SmallVector<NestedNameSpecifierLoc , 8> NestedNames;
@@ -7322,6 +8041,12 @@ NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
return Builder.getWithLocInContext(getToContext());
}
+Expected<TemplateName> ASTImporter::Import_New(TemplateName From) {
+ TemplateName To = Import(From);
+ if (To.isNull() && !From.isNull())
+ return llvm::make_error<ImportError>();
+ return To;
+}
TemplateName ASTImporter::Import(TemplateName From) {
switch (From.getKind()) {
case TemplateName::Template:
@@ -7398,18 +8123,26 @@ TemplateName ASTImporter::Import(TemplateName From) {
return {};
ASTNodeImporter Importer(*this);
- TemplateArgument ArgPack
+ Expected<TemplateArgument> ArgPack
= Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
- if (ArgPack.isNull())
+ if (!ArgPack) {
+ llvm::consumeError(ArgPack.takeError());
return {};
+ }
- return ToContext.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ return ToContext.getSubstTemplateTemplateParmPack(Param, *ArgPack);
}
}
llvm_unreachable("Invalid template name kind");
}
+Expected<SourceLocation> ASTImporter::Import_New(SourceLocation FromLoc) {
+ SourceLocation ToLoc = Import(FromLoc);
+ if (ToLoc.isInvalid() && !FromLoc.isInvalid())
+ return llvm::make_error<ImportError>();
+ return ToLoc;
+}
SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
if (FromLoc.isInvalid())
return {};
@@ -7424,10 +8157,20 @@ SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
return ToSM.getComposedLoc(ToFileID, Decomposed.second);
}
+Expected<SourceRange> ASTImporter::Import_New(SourceRange FromRange) {
+ SourceRange ToRange = Import(FromRange);
+ return ToRange;
+}
SourceRange ASTImporter::Import(SourceRange FromRange) {
return SourceRange(Import(FromRange.getBegin()), Import(FromRange.getEnd()));
}
+Expected<FileID> ASTImporter::Import_New(FileID FromID) {
+ FileID ToID = Import(FromID);
+ if (ToID.isInvalid() && FromID.isValid())
+ return llvm::make_error<ImportError>();
+ return ToID;
+}
FileID ASTImporter::Import(FileID FromID) {
llvm::DenseMap<FileID, FileID>::iterator Pos = ImportedFileIDs.find(FromID);
if (Pos != ImportedFileIDs.end())
@@ -7485,6 +8228,13 @@ FileID ASTImporter::Import(FileID FromID) {
return ToID;
}
+Expected<CXXCtorInitializer *>
+ASTImporter::Import_New(CXXCtorInitializer *From) {
+ CXXCtorInitializer *To = Import(From);
+ if (!To && From)
+ return llvm::make_error<ImportError>();
+ return To;
+}
CXXCtorInitializer *ASTImporter::Import(CXXCtorInitializer *From) {
Expr *ToExpr = Import(From->getInit());
if (!ToExpr && From->getInit())
@@ -7530,6 +8280,13 @@ CXXCtorInitializer *ASTImporter::Import(CXXCtorInitializer *From) {
}
}
+Expected<CXXBaseSpecifier *>
+ASTImporter::Import_New(const CXXBaseSpecifier *From) {
+ CXXBaseSpecifier *To = Import(From);
+ if (!To && From)
+ return llvm::make_error<ImportError>();
+ return To;
+}
CXXBaseSpecifier *ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) {
auto Pos = ImportedCXXBaseSpecifiers.find(BaseSpec);
if (Pos != ImportedCXXBaseSpecifiers.end())
@@ -7545,50 +8302,62 @@ CXXBaseSpecifier *ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) {
return Imported;
}
-void ASTImporter::ImportDefinition(Decl *From) {
+Error ASTImporter::ImportDefinition_New(Decl *From) {
Decl *To = Import(From);
if (!To)
- return;
+ return llvm::make_error<ImportError>();
if (auto *FromDC = cast<DeclContext>(From)) {
ASTNodeImporter Importer(*this);
if (auto *ToRecord = dyn_cast<RecordDecl>(To)) {
if (!ToRecord->getDefinition()) {
- Importer.ImportDefinition(cast<RecordDecl>(FromDC), ToRecord,
- ASTNodeImporter::IDK_Everything);
- return;
+ return Importer.ImportDefinition(
+ cast<RecordDecl>(FromDC), ToRecord,
+ ASTNodeImporter::IDK_Everything);
}
}
if (auto *ToEnum = dyn_cast<EnumDecl>(To)) {
if (!ToEnum->getDefinition()) {
- Importer.ImportDefinition(cast<EnumDecl>(FromDC), ToEnum,
- ASTNodeImporter::IDK_Everything);
- return;
+ return Importer.ImportDefinition(
+ cast<EnumDecl>(FromDC), ToEnum, ASTNodeImporter::IDK_Everything);
}
}
if (auto *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
if (!ToIFace->getDefinition()) {
- Importer.ImportDefinition(cast<ObjCInterfaceDecl>(FromDC), ToIFace,
- ASTNodeImporter::IDK_Everything);
- return;
+ return Importer.ImportDefinition(
+ cast<ObjCInterfaceDecl>(FromDC), ToIFace,
+ ASTNodeImporter::IDK_Everything);
}
}
if (auto *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
if (!ToProto->getDefinition()) {
- Importer.ImportDefinition(cast<ObjCProtocolDecl>(FromDC), ToProto,
- ASTNodeImporter::IDK_Everything);
- return;
+ return Importer.ImportDefinition(
+ cast<ObjCProtocolDecl>(FromDC), ToProto,
+ ASTNodeImporter::IDK_Everything);
}
}
- Importer.ImportDeclContext(FromDC, true);
+ return Importer.ImportDeclContext(FromDC, true);
}
+
+ return Error::success();
}
+void ASTImporter::ImportDefinition(Decl *From) {
+ Error Err = ImportDefinition_New(From);
+ llvm::consumeError(std::move(Err));
+}
+
+Expected<DeclarationName> ASTImporter::Import_New(DeclarationName FromName) {
+ DeclarationName ToName = Import(FromName);
+ if (!ToName && FromName)
+ return llvm::make_error<ImportError>();
+ return ToName;
+}
DeclarationName ASTImporter::Import(DeclarationName FromName) {
if (!FromName)
return {};
@@ -7665,6 +8434,12 @@ IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
return ToId;
}
+Expected<Selector> ASTImporter::Import_New(Selector FromSel) {
+ Selector ToSel = Import(FromSel);
+ if (ToSel.isNull() && !FromSel.isNull())
+ return llvm::make_error<ImportError>();
+ return ToSel;
+}
Selector ASTImporter::Import(Selector FromSel) {
if (FromSel.isNull())
return {};
diff --git a/lib/AST/ASTStructuralEquivalence.cpp b/lib/AST/ASTStructuralEquivalence.cpp
index 9149006a20..d19b89bb95 100644
--- a/lib/AST/ASTStructuralEquivalence.cpp
+++ b/lib/AST/ASTStructuralEquivalence.cpp
@@ -911,7 +911,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
-/// Determine structural equivalence of two methodss.
+/// Determine structural equivalence of two methods.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
CXXMethodDecl *Method1,
CXXMethodDecl *Method2) {
@@ -1016,7 +1016,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
// Compare the definitions of these two records. If either or both are
- // incomplete, we assume that they are equivalent.
+ // incomplete (i.e. it is a forward decl), we assume that they are
+ // equivalent.
D1 = D1->getDefinition();
D2 = D2->getDefinition();
if (!D1 || !D2)
@@ -1031,6 +1032,11 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (D1->hasExternalLexicalStorage() || D2->hasExternalLexicalStorage())
return true;
+ // If one definition is currently being defined, we do not compare for
+ // equality and we assume that the decls are equal.
+ if (D1->isBeingDefined() || D2->isBeingDefined())
+ return true;
+
if (auto *D1CXX = dyn_cast<CXXRecordDecl>(D1)) {
if (auto *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
if (D1CXX->hasExternalLexicalStorage() &&
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index 4f868a3af5..adeb9f7e64 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -39,6 +39,7 @@ add_clang_library(clangAST
ExprObjC.cpp
ExternalASTMerger.cpp
ExternalASTSource.cpp
+ FormatString.cpp
InheritViz.cpp
ItaniumCXXABI.cpp
ItaniumMangle.cpp
@@ -48,12 +49,15 @@ add_clang_library(clangAST
NestedNameSpecifier.cpp
NSAPI.cpp
ODRHash.cpp
+ OSLog.cpp
OpenMPClause.cpp
ParentMap.cpp
+ PrintfFormatString.cpp
QualTypeNames.cpp
RawCommentList.cpp
RecordLayout.cpp
RecordLayoutBuilder.cpp
+ ScanfFormatString.cpp
SelectorLocationsKind.cpp
Stmt.cpp
StmtCXX.cpp
@@ -65,6 +69,7 @@ add_clang_library(clangAST
StmtViz.cpp
TemplateBase.cpp
TemplateName.cpp
+ TextNodeDumper.cpp
Type.cpp
TypeLoc.cpp
TypePrinter.cpp
diff --git a/lib/AST/CommentParser.cpp b/lib/AST/CommentParser.cpp
index c1c04239f5..7f70b95e98 100644
--- a/lib/AST/CommentParser.cpp
+++ b/lib/AST/CommentParser.cpp
@@ -558,7 +558,6 @@ BlockContentComment *Parser::parseParagraphOrBlockCommand() {
case tok::verbatim_block_begin:
case tok::verbatim_line_name:
case tok::eof:
- assert(Content.size() != 0);
break; // Block content or EOF ahead, finish this parapgaph.
case tok::unknown_command:
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 272e49a799..b32e5d9aa0 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/Decl.h"
#include "Linkage.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CanonicalType.h"
@@ -49,7 +50,6 @@
#include "clang/Basic/TargetCXXABI.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Visibility.h"
-#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
@@ -725,7 +725,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// If we're paying attention to global visibility, apply
// -finline-visibility-hidden if this is an inline method.
if (useInlineVisibilityHidden(D))
- LV.mergeVisibility(HiddenVisibility, true);
+ LV.mergeVisibility(HiddenVisibility, /*visibilityExplicit=*/false);
}
}
@@ -915,7 +915,7 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D,
// Note that we do this before merging information about
// the class visibility.
if (!LV.isVisibilityExplicit() && useInlineVisibilityHidden(D))
- LV.mergeVisibility(HiddenVisibility, true);
+ LV.mergeVisibility(HiddenVisibility, /*visibilityExplicit=*/false);
}
// If this class member has an explicit visibility attribute, the only
@@ -1265,14 +1265,24 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
// If a function is hidden by -fvisibility-inlines-hidden option and
// is not explicitly attributed as a hidden function,
// we should not make static local variables in the function hidden.
+ LV = getLVForDecl(FD, computation);
if (isa<VarDecl>(D) && useInlineVisibilityHidden(FD) &&
- !(!hasExplicitVisibilityAlready(computation) &&
- getExplicitVisibility(FD, computation))) {
+ !LV.isVisibilityExplicit()) {
assert(cast<VarDecl>(D)->isStaticLocal());
- return LinkageInfo(VisibleNoLinkage, DefaultVisibility, false);
+ // If this was an implicitly hidden inline method, check again for
+ // explicit visibility on the parent class, and use that for static locals
+ // if present.
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
+ LV = getLVForDecl(MD->getParent(), computation);
+ if (!LV.isVisibilityExplicit()) {
+ Visibility globalVisibility =
+ computation.isValueVisibility()
+ ? Context.getLangOpts().getValueVisibilityMode()
+ : Context.getLangOpts().getTypeVisibilityMode();
+ return LinkageInfo(VisibleNoLinkage, globalVisibility,
+ /*visibilityExplicit=*/false);
+ }
}
-
- LV = getLVForDecl(FD, computation);
}
if (!isExternallyVisible(LV.getLinkage()))
return LinkageInfo::none();
@@ -2361,6 +2371,14 @@ static DeclT *getDefinitionOrSelf(DeclT *D) {
return D;
}
+bool VarDecl::isEscapingByref() const {
+ return hasAttr<BlocksAttr>() && NonParmVarDeclBits.EscapingByref;
+}
+
+bool VarDecl::isNonEscapingByref() const {
+ return hasAttr<BlocksAttr>() && !NonParmVarDeclBits.EscapingByref;
+}
+
VarDecl *VarDecl::getTemplateInstantiationPattern() const {
// If it's a variable template specialization, find the template or partial
// specialization from which it was instantiated.
@@ -2451,7 +2469,7 @@ bool VarDecl::isKnownToBeDefined() const {
//
// With CUDA relocatable device code enabled, these variables don't get
// special handling; they're treated like regular extern variables.
- if (LangOpts.CUDA && !LangOpts.CUDARelocatableDeviceCode &&
+ if (LangOpts.CUDA && !LangOpts.GPURelocatableDeviceCode &&
hasExternalStorage() && hasAttr<CUDASharedAttr>() &&
isa<IncompleteArrayType>(getType()))
return true;
@@ -2558,7 +2576,7 @@ Expr *ParmVarDecl::getDefaultArg() {
"Default argument is not yet instantiated!");
Expr *Arg = getInit();
- if (auto *E = dyn_cast_or_null<ExprWithCleanups>(Arg))
+ if (auto *E = dyn_cast_or_null<FullExpr>(Arg))
return E->getSubExpr();
return Arg;
@@ -2634,27 +2652,30 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
StartLoc),
DeclContext(DK), redeclarable_base(C), ODRHash(0),
EndRangeLoc(NameInfo.getEndLoc()), DNLoc(NameInfo.getInfo()) {
- setStorageClass(S);
- setInlineSpecified(isInlineSpecified);
- setExplicitSpecified(false);
- setVirtualAsWritten(false);
- setPure(false);
- setHasInheritedPrototype(false);
- setHasWrittenPrototype(true);
- setDeletedAsWritten(false);
- setTrivial(false);
- setTrivialForCall(false);
- setDefaulted(false);
- setExplicitlyDefaulted(false);
- setHasImplicitReturnZero(false);
- setLateTemplateParsed(false);
- setConstexpr(isConstexprSpecified);
- setInstantiationIsPending(false);
- setUsesSEHTry(false);
- setHasSkippedBody(false);
- setWillHaveBody(false);
- setIsMultiVersion(false);
- setHasODRHash(false);
+ assert(T.isNull() || T->isFunctionType());
+ FunctionDeclBits.SClass = S;
+ FunctionDeclBits.IsInline = isInlineSpecified;
+ FunctionDeclBits.IsInlineSpecified = isInlineSpecified;
+ FunctionDeclBits.IsExplicitSpecified = false;
+ FunctionDeclBits.IsVirtualAsWritten = false;
+ FunctionDeclBits.IsPure = false;
+ FunctionDeclBits.HasInheritedPrototype = false;
+ FunctionDeclBits.HasWrittenPrototype = true;
+ FunctionDeclBits.IsDeleted = false;
+ FunctionDeclBits.IsTrivial = false;
+ FunctionDeclBits.IsTrivialForCall = false;
+ FunctionDeclBits.IsDefaulted = false;
+ FunctionDeclBits.IsExplicitlyDefaulted = false;
+ FunctionDeclBits.HasImplicitReturnZero = false;
+ FunctionDeclBits.IsLateTemplateParsed = false;
+ FunctionDeclBits.IsConstexpr = isConstexprSpecified;
+ FunctionDeclBits.InstantiationIsPending = false;
+ FunctionDeclBits.UsesSEHTry = false;
+ FunctionDeclBits.HasSkippedBody = false;
+ FunctionDeclBits.WillHaveBody = false;
+ FunctionDeclBits.IsMultiVersion = false;
+ FunctionDeclBits.IsCopyDeductionCandidate = false;
+ FunctionDeclBits.HasODRHash = false;
}
void FunctionDecl::getNameForDiagnostic(
@@ -2921,6 +2942,17 @@ bool FunctionDecl::isNoReturn() const {
return false;
}
+
+MultiVersionKind FunctionDecl::getMultiVersionKind() const {
+ if (hasAttr<TargetAttr>())
+ return MultiVersionKind::Target;
+ if (hasAttr<CPUDispatchAttr>())
+ return MultiVersionKind::CPUDispatch;
+ if (hasAttr<CPUSpecificAttr>())
+ return MultiVersionKind::CPUSpecific;
+ return MultiVersionKind::None;
+}
+
bool FunctionDecl::isCPUDispatchMultiVersion() const {
return isMultiVersion() && hasAttr<CPUDispatchAttr>();
}
@@ -2929,6 +2961,10 @@ bool FunctionDecl::isCPUSpecificMultiVersion() const {
return isMultiVersion() && hasAttr<CPUSpecificAttr>();
}
+bool FunctionDecl::isTargetMultiVersion() const {
+ return isMultiVersion() && hasAttr<TargetAttr>();
+}
+
void
FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
redeclarable_base::setPreviousDecl(PrevDecl);
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index f5de359106..95babf7917 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -955,10 +955,7 @@ static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
int64_t Decl::getID() const {
- Optional<int64_t> Out = getASTContext().getAllocator().identifyObject(this);
- assert(Out && "Wrong allocator used");
- assert(*Out % alignof(Decl) == 0 && "Wrong alignment information");
- return *Out / alignof(Decl);
+ return getASTContext().getAllocator().identifyKnownAlignedObject<Decl>(this);
}
const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
@@ -1724,8 +1721,18 @@ void DeclContext::localUncachedLookup(DeclarationName Name,
DeclContext *DeclContext::getRedeclContext() {
DeclContext *Ctx = this;
- // Skip through transparent contexts.
- while (Ctx->isTransparentContext())
+
+ // In C, a record type is the redeclaration context for its fields only. If
+ // we arrive at a record context after skipping anything else, we should skip
+ // the record as well. Currently, this means skipping enumerations because
+ // they're the only transparent context that can exist within a struct or
+ // union.
+ bool SkipRecords = getDeclKind() == Decl::Kind::Enum &&
+ !getParentASTContext().getLangOpts().CPlusPlus;
+
+ // Skip through contexts to get to the redeclaration context. Transparent
+ // contexts are always skipped.
+ while ((SkipRecords && Ctx->isRecord()) || Ctx->isTransparentContext())
Ctx = Ctx->getParent();
return Ctx;
}
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index 9bb6e3a013..33f159417b 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -2173,19 +2173,24 @@ CXXMethodDecl::overridden_methods() const {
return getASTContext().overridden_methods(this);
}
+QualType CXXMethodDecl::getThisType(const FunctionProtoType *FPT,
+ const CXXRecordDecl *Decl) {
+ ASTContext &C = Decl->getASTContext();
+ QualType ClassTy = C.getTypeDeclType(Decl);
+ ClassTy = C.getQualifiedType(ClassTy, FPT->getTypeQuals());
+ return C.getPointerType(ClassTy);
+}
+
QualType CXXMethodDecl::getThisType(ASTContext &C) const {
// C++ 9.3.2p1: The type of this in a member function of a class X is X*.
// If the member function is declared const, the type of this is const X*,
// if the member function is declared volatile, the type of this is
// volatile X*, and if the member function is declared const volatile,
// the type of this is const volatile X*.
-
assert(isInstance() && "No 'this' for static methods!");
- QualType ClassTy = C.getTypeDeclType(getParent());
- ClassTy = C.getQualifiedType(ClassTy,
- Qualifiers::fromCVRUMask(getTypeQualifiers()));
- return C.getPointerType(ClassTy);
+ return CXXMethodDecl::getThisType(getType()->getAs<FunctionProtoType>(),
+ getParent());
}
bool CXXMethodDecl::hasInlineBody() const {
@@ -2246,6 +2251,11 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
: Initializee(TInfo), Init(Init), LParenLoc(L), RParenLoc(R),
IsDelegating(true), IsVirtual(false), IsWritten(false), SourceOrder(0) {}
+int64_t CXXCtorInitializer::getID(const ASTContext &Context) const {
+ return Context.getAllocator()
+ .identifyKnownAlignedObject<CXXCtorInitializer>(this);
+}
+
TypeLoc CXXCtorInitializer::getBaseClassLoc() const {
if (isBaseInitializer())
return Initializee.get<TypeSourceInfo*>()->getTypeLoc();
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index 7fc275f90e..517851f9ee 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -1549,11 +1549,9 @@ void DeclPrinter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
void DeclPrinter::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
Out << "#pragma omp requires ";
if (!D->clauselist_empty()) {
- for (auto I = D->clauselist_begin(), E = D->clauselist_end(); I != E; ++I) {
- if (I != D->clauselist_begin())
- Out << ',';
- Out << getOpenMPClauseName((*I)->getClauseKind());
- }
+ OMPClausePrinter Printer(Out, Policy);
+ for (auto I = D->clauselist_begin(), E = D->clauselist_end(); I != E; ++I)
+ Printer.Visit(*I);
}
}
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index 8e4944607c..04e1803281 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -300,6 +300,40 @@ ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() {
return llvm::makeArrayRef(CommonPtr->InjectedArgs, Params->size());
}
+void FunctionTemplateDecl::mergePrevDecl(FunctionTemplateDecl *Prev) {
+ using Base = RedeclarableTemplateDecl;
+
+ // If we haven't created a common pointer yet, then it can just be created
+ // with the usual method.
+ if (!Base::Common)
+ return;
+
+ Common *ThisCommon = static_cast<Common *>(Base::Common);
+ Common *PrevCommon = nullptr;
+ SmallVector<FunctionTemplateDecl *, 8> PreviousDecls;
+ for (; Prev; Prev = Prev->getPreviousDecl()) {
+ if (Prev->Base::Common) {
+ PrevCommon = static_cast<Common *>(Prev->Base::Common);
+ break;
+ }
+ PreviousDecls.push_back(Prev);
+ }
+
+ // If the previous redecl chain hasn't created a common pointer yet, then just
+ // use this common pointer.
+ if (!PrevCommon) {
+ for (auto *D : PreviousDecls)
+ D->Base::Common = ThisCommon;
+ return;
+ }
+
+ // Ensure we don't leak any important state.
+ assert(ThisCommon->Specializations.size() == 0 &&
+ "Can't merge incompatible declarations!");
+
+ Base::Common = PrevCommon;
+}
+
//===----------------------------------------------------------------------===//
// ClassTemplateDecl Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 651981374d..5a80016850 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -28,7 +28,6 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/LiteralSupport.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -351,7 +350,8 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
const TemplateArgumentListInfo *TemplateArgs,
QualType T, ExprValueKind VK)
: Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
- D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) {
+ D(D), DNLoc(NameInfo.getInfo()) {
+ DeclRefExprBits.Loc = NameInfo.getLoc();
DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
if (QualifierLoc) {
new (getTrailingObjects<NestedNameSpecifierLoc>())
@@ -458,20 +458,45 @@ SourceLocation DeclRefExpr::getEndLoc() const {
return getNameInfo().getEndLoc();
}
-PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentType IT,
+PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
StringLiteral *SL)
: Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary,
FNTy->isDependentType(), FNTy->isDependentType(),
FNTy->isInstantiationDependentType(),
- /*ContainsUnexpandedParameterPack=*/false),
- Loc(L), Type(IT), FnName(SL) {}
+ /*ContainsUnexpandedParameterPack=*/false) {
+ PredefinedExprBits.Kind = IK;
+ assert((getIdentKind() == IK) &&
+ "IdentKind do not fit in PredefinedExprBitfields!");
+ bool HasFunctionName = SL != nullptr;
+ PredefinedExprBits.HasFunctionName = HasFunctionName;
+ PredefinedExprBits.Loc = L;
+ if (HasFunctionName)
+ setFunctionName(SL);
+}
+
+PredefinedExpr::PredefinedExpr(EmptyShell Empty, bool HasFunctionName)
+ : Expr(PredefinedExprClass, Empty) {
+ PredefinedExprBits.HasFunctionName = HasFunctionName;
+}
-StringLiteral *PredefinedExpr::getFunctionName() {
- return cast_or_null<StringLiteral>(FnName);
+PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
+ QualType FNTy, IdentKind IK,
+ StringLiteral *SL) {
+ bool HasFunctionName = SL != nullptr;
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
+ alignof(PredefinedExpr));
+ return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
}
-StringRef PredefinedExpr::getIdentTypeName(PredefinedExpr::IdentType IT) {
- switch (IT) {
+PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx,
+ bool HasFunctionName) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
+ alignof(PredefinedExpr));
+ return new (Mem) PredefinedExpr(EmptyShell(), HasFunctionName);
+}
+
+StringRef PredefinedExpr::getIdentKindName(PredefinedExpr::IdentKind IK) {
+ switch (IK) {
case Func:
return "__func__";
case Function:
@@ -489,15 +514,15 @@ StringRef PredefinedExpr::getIdentTypeName(PredefinedExpr::IdentType IT) {
case PrettyFunctionNoVirtual:
break;
}
- llvm_unreachable("Unknown ident type for PredefinedExpr");
+ llvm_unreachable("Unknown ident kind for PredefinedExpr");
}
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
-std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
+std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
ASTContext &Context = CurrentDecl->getASTContext();
- if (IT == PredefinedExpr::FuncDName) {
+ if (IK == PredefinedExpr::FuncDName) {
if (const NamedDecl *ND = dyn_cast<NamedDecl>(CurrentDecl)) {
std::unique_ptr<MangleContext> MC;
MC.reset(Context.createMangleContext());
@@ -532,21 +557,21 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
llvm::raw_svector_ostream Out(Buffer);
if (auto *DCBlock = dyn_cast<BlockDecl>(DC))
// For nested blocks, propagate up to the parent.
- Out << ComputeName(IT, DCBlock);
+ Out << ComputeName(IK, DCBlock);
else if (auto *DCDecl = dyn_cast<Decl>(DC))
- Out << ComputeName(IT, DCDecl) << "_block_invoke";
+ Out << ComputeName(IK, DCDecl) << "_block_invoke";
return Out.str();
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
- if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual &&
- IT != FuncSig && IT != LFuncSig)
+ if (IK != PrettyFunction && IK != PrettyFunctionNoVirtual &&
+ IK != FuncSig && IK != LFuncSig)
return FD->getNameAsString();
SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (MD->isVirtual() && IT != PrettyFunctionNoVirtual)
+ if (MD->isVirtual() && IK != PrettyFunctionNoVirtual)
Out << "virtual ";
if (MD->isStatic())
Out << "static ";
@@ -564,7 +589,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
if (FD->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(AFT);
- if (IT == FuncSig || IT == LFuncSig) {
+ if (IK == FuncSig || IK == LFuncSig) {
switch (AFT->getCallConv()) {
case CC_C: POut << "__cdecl "; break;
case CC_X86StdCall: POut << "__stdcall "; break;
@@ -589,7 +614,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
if (FT->isVariadic()) {
if (FD->getNumParams()) POut << ", ";
POut << "...";
- } else if ((IT == FuncSig || IT == LFuncSig ||
+ } else if ((IK == FuncSig || IK == LFuncSig ||
!Context.getLangOpts().CPlusPlus) &&
!Decl->getNumParams()) {
POut << "void";
@@ -688,7 +713,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
// CapturedDecl.
if (DC->isFunctionOrMethod() && (DC->getDeclKind() != Decl::Captured)) {
const Decl *D = Decl::castFromDeclContext(DC);
- return ComputeName(IT, D);
+ return ComputeName(IK, D);
}
llvm_unreachable("CapturedDecl not inside a function or method");
}
@@ -713,7 +738,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
return Name.str().str();
}
- if (isa<TranslationUnitDecl>(CurrentDecl) && IT == PrettyFunction) {
+ if (isa<TranslationUnitDecl>(CurrentDecl) && IK == PrettyFunction) {
// __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
return "top level";
}
@@ -861,66 +886,105 @@ double FloatingLiteral::getValueAsApproximateDouble() const {
return V.convertToDouble();
}
-int StringLiteral::mapCharByteWidth(TargetInfo const &target,StringKind k) {
- int CharByteWidth = 0;
- switch(k) {
- case Ascii:
- case UTF8:
- CharByteWidth = target.getCharWidth();
- break;
- case Wide:
- CharByteWidth = target.getWCharWidth();
- break;
- case UTF16:
- CharByteWidth = target.getChar16Width();
- break;
- case UTF32:
- CharByteWidth = target.getChar32Width();
- break;
+unsigned StringLiteral::mapCharByteWidth(TargetInfo const &Target,
+ StringKind SK) {
+ unsigned CharByteWidth = 0;
+ switch (SK) {
+ case Ascii:
+ case UTF8:
+ CharByteWidth = Target.getCharWidth();
+ break;
+ case Wide:
+ CharByteWidth = Target.getWCharWidth();
+ break;
+ case UTF16:
+ CharByteWidth = Target.getChar16Width();
+ break;
+ case UTF32:
+ CharByteWidth = Target.getChar32Width();
+ break;
}
assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
CharByteWidth /= 8;
- assert((CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4)
- && "character byte widths supported are 1, 2, and 4 only");
+ assert((CharByteWidth == 1 || CharByteWidth == 2 || CharByteWidth == 4) &&
+ "The only supported character byte widths are 1,2 and 4!");
return CharByteWidth;
}
-StringLiteral *StringLiteral::Create(const ASTContext &C, StringRef Str,
- StringKind Kind, bool Pascal, QualType Ty,
- const SourceLocation *Loc,
- unsigned NumStrs) {
- assert(C.getAsConstantArrayType(Ty) &&
+StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
+ StringKind Kind, bool Pascal, QualType Ty,
+ const SourceLocation *Loc,
+ unsigned NumConcatenated)
+ : Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary, false, false, false,
+ false) {
+ assert(Ctx.getAsConstantArrayType(Ty) &&
"StringLiteral must be of constant array type!");
+ unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind);
+ unsigned ByteLength = Str.size();
+ assert((ByteLength % CharByteWidth == 0) &&
+ "The size of the data must be a multiple of CharByteWidth!");
+
+ // Avoid the expensive division. The compiler should be able to figure it
+ // out by itself. However as of clang 7, even with the appropriate
+ // llvm_unreachable added just here, it is not able to do so.
+ unsigned Length;
+ switch (CharByteWidth) {
+ case 1:
+ Length = ByteLength;
+ break;
+ case 2:
+ Length = ByteLength / 2;
+ break;
+ case 4:
+ Length = ByteLength / 4;
+ break;
+ default:
+ llvm_unreachable("Unsupported character width!");
+ }
- // Allocate enough space for the StringLiteral plus an array of locations for
- // any concatenated string tokens.
- void *Mem =
- C.Allocate(sizeof(StringLiteral) + sizeof(SourceLocation) * (NumStrs - 1),
- alignof(StringLiteral));
- StringLiteral *SL = new (Mem) StringLiteral(Ty);
+ StringLiteralBits.Kind = Kind;
+ StringLiteralBits.CharByteWidth = CharByteWidth;
+ StringLiteralBits.IsPascal = Pascal;
+ StringLiteralBits.NumConcatenated = NumConcatenated;
+ *getTrailingObjects<unsigned>() = Length;
- // OPTIMIZE: could allocate this appended to the StringLiteral.
- SL->setString(C,Str,Kind,Pascal);
+ // Initialize the trailing array of SourceLocation.
+ // This is safe since SourceLocation is POD-like.
+ std::memcpy(getTrailingObjects<SourceLocation>(), Loc,
+ NumConcatenated * sizeof(SourceLocation));
- SL->TokLocs[0] = Loc[0];
- SL->NumConcatenated = NumStrs;
+ // Initialize the trailing array of char holding the string data.
+ std::memcpy(getTrailingObjects<char>(), Str.data(), ByteLength);
+}
- if (NumStrs != 1)
- memcpy(&SL->TokLocs[1], Loc+1, sizeof(SourceLocation)*(NumStrs-1));
- return SL;
+StringLiteral::StringLiteral(EmptyShell Empty, unsigned NumConcatenated,
+ unsigned Length, unsigned CharByteWidth)
+ : Expr(StringLiteralClass, Empty) {
+ StringLiteralBits.CharByteWidth = CharByteWidth;
+ StringLiteralBits.NumConcatenated = NumConcatenated;
+ *getTrailingObjects<unsigned>() = Length;
}
-StringLiteral *StringLiteral::CreateEmpty(const ASTContext &C,
- unsigned NumStrs) {
- void *Mem =
- C.Allocate(sizeof(StringLiteral) + sizeof(SourceLocation) * (NumStrs - 1),
- alignof(StringLiteral));
- StringLiteral *SL =
- new (Mem) StringLiteral(C.adjustStringLiteralBaseType(QualType()));
- SL->CharByteWidth = 0;
- SL->Length = 0;
- SL->NumConcatenated = NumStrs;
- return SL;
+StringLiteral *StringLiteral::Create(const ASTContext &Ctx, StringRef Str,
+ StringKind Kind, bool Pascal, QualType Ty,
+ const SourceLocation *Loc,
+ unsigned NumConcatenated) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<unsigned, SourceLocation, char>(
+ 1, NumConcatenated, Str.size()),
+ alignof(StringLiteral));
+ return new (Mem)
+ StringLiteral(Ctx, Str, Kind, Pascal, Ty, Loc, NumConcatenated);
+}
+
+StringLiteral *StringLiteral::CreateEmpty(const ASTContext &Ctx,
+ unsigned NumConcatenated,
+ unsigned Length,
+ unsigned CharByteWidth) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<unsigned, SourceLocation, char>(
+ 1, NumConcatenated, Length * CharByteWidth),
+ alignof(StringLiteral));
+ return new (Mem)
+ StringLiteral(EmptyShell(), NumConcatenated, Length, CharByteWidth);
}
void StringLiteral::outputString(raw_ostream &OS) const {
@@ -1019,42 +1083,6 @@ void StringLiteral::outputString(raw_ostream &OS) const {
OS << '"';
}
-void StringLiteral::setString(const ASTContext &C, StringRef Str,
- StringKind Kind, bool IsPascal) {
- //FIXME: we assume that the string data comes from a target that uses the same
- // code unit size and endianness for the type of string.
- this->Kind = Kind;
- this->IsPascal = IsPascal;
-
- CharByteWidth = mapCharByteWidth(C.getTargetInfo(),Kind);
- assert((Str.size()%CharByteWidth == 0)
- && "size of data must be multiple of CharByteWidth");
- Length = Str.size()/CharByteWidth;
-
- switch(CharByteWidth) {
- case 1: {
- char *AStrData = new (C) char[Length];
- std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
- StrData.asChar = AStrData;
- break;
- }
- case 2: {
- uint16_t *AStrData = new (C) uint16_t[Length];
- std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
- StrData.asUInt16 = AStrData;
- break;
- }
- case 4: {
- uint32_t *AStrData = new (C) uint32_t[Length];
- std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
- StrData.asUInt32 = AStrData;
- break;
- }
- default:
- llvm_unreachable("unsupported CharByteWidth");
- }
-}
-
/// getLocationOfByte - Return a source location that points to the specified
/// byte of this string literal.
///
@@ -1076,7 +1104,8 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
const LangOptions &Features,
const TargetInfo &Target, unsigned *StartToken,
unsigned *StartTokenByteOffset) const {
- assert((Kind == StringLiteral::Ascii || Kind == StringLiteral::UTF8) &&
+ assert((getKind() == StringLiteral::Ascii ||
+ getKind() == StringLiteral::UTF8) &&
"Only narrow string literals are currently supported");
// Loop over all of the tokens in this string until we find the one that
@@ -1144,8 +1173,6 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
}
}
-
-
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
/// corresponds to, e.g. "sizeof" or "[pre]++".
StringRef UnaryOperator::getOpcodeStr(Opcode Op) {
@@ -1194,14 +1221,19 @@ OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
CallExpr::CallExpr(const ASTContext &C, StmtClass SC, Expr *fn,
ArrayRef<Expr *> preargs, ArrayRef<Expr *> args, QualType t,
- ExprValueKind VK, SourceLocation rparenloc)
+ ExprValueKind VK, SourceLocation rparenloc,
+ unsigned MinNumArgs, ADLCallKind UsesADL)
: Expr(SC, t, VK, OK_Ordinary, fn->isTypeDependent(),
fn->isValueDependent(), fn->isInstantiationDependent(),
fn->containsUnexpandedParameterPack()),
- NumArgs(args.size()) {
+ RParenLoc(rparenloc) {
+ CallExprBits.UsesADL = static_cast<bool>(UsesADL);
+ NumArgs = std::max<unsigned>(args.size(), MinNumArgs);
unsigned NumPreArgs = preargs.size();
- SubExprs = new (C) Stmt *[args.size()+PREARGS_START+NumPreArgs];
+ CallExprBits.NumPreArgs = NumPreArgs;
+
+ SubExprs = new (C) Stmt *[NumArgs + PREARGS_START + NumPreArgs];
SubExprs[FN] = fn;
for (unsigned i = 0; i != NumPreArgs; ++i) {
updateDependenciesFromArg(preargs[i]);
@@ -1211,32 +1243,34 @@ CallExpr::CallExpr(const ASTContext &C, StmtClass SC, Expr *fn,
updateDependenciesFromArg(args[i]);
SubExprs[i+PREARGS_START+NumPreArgs] = args[i];
}
-
- CallExprBits.NumPreArgs = NumPreArgs;
- RParenLoc = rparenloc;
+ for (unsigned i = args.size(); i != NumArgs; ++i) {
+ SubExprs[i + PREARGS_START + NumPreArgs] = nullptr;
+ }
}
CallExpr::CallExpr(const ASTContext &C, StmtClass SC, Expr *fn,
ArrayRef<Expr *> args, QualType t, ExprValueKind VK,
- SourceLocation rparenloc)
- : CallExpr(C, SC, fn, ArrayRef<Expr *>(), args, t, VK, rparenloc) {}
+ SourceLocation rparenloc, unsigned MinNumArgs,
+ ADLCallKind UsesADL)
+ : CallExpr(C, SC, fn, ArrayRef<Expr *>(), args, t, VK, rparenloc,
+ MinNumArgs, UsesADL) {}
CallExpr::CallExpr(const ASTContext &C, Expr *fn, ArrayRef<Expr *> args,
- QualType t, ExprValueKind VK, SourceLocation rparenloc)
- : CallExpr(C, CallExprClass, fn, ArrayRef<Expr *>(), args, t, VK, rparenloc) {
-}
-
-CallExpr::CallExpr(const ASTContext &C, StmtClass SC, EmptyShell Empty)
- : CallExpr(C, SC, /*NumPreArgs=*/0, Empty) {}
+ QualType t, ExprValueKind VK, SourceLocation rparenloc,
+ unsigned MinNumArgs, ADLCallKind UsesADL)
+ : CallExpr(C, CallExprClass, fn, ArrayRef<Expr *>(), args, t, VK, rparenloc,
+ MinNumArgs, UsesADL) {}
CallExpr::CallExpr(const ASTContext &C, StmtClass SC, unsigned NumPreArgs,
- EmptyShell Empty)
- : Expr(SC, Empty), SubExprs(nullptr), NumArgs(0) {
- // FIXME: Why do we allocate this?
- SubExprs = new (C) Stmt*[PREARGS_START+NumPreArgs]();
+ unsigned NumArgs, EmptyShell Empty)
+ : Expr(SC, Empty), NumArgs(NumArgs) {
CallExprBits.NumPreArgs = NumPreArgs;
+ SubExprs = new (C) Stmt *[NumArgs + PREARGS_START + NumPreArgs];
}
+CallExpr::CallExpr(const ASTContext &C, unsigned NumArgs, EmptyShell Empty)
+ : CallExpr(C, CallExprClass, /*NumPreArgs=*/0, NumArgs, Empty) {}
+
void CallExpr::updateDependenciesFromArg(Expr *Arg) {
if (Arg->isTypeDependent())
ExprBits.TypeDependent = true;
@@ -1280,35 +1314,6 @@ Decl *Expr::getReferencedDeclOfCallee() {
return nullptr;
}
-/// setNumArgs - This changes the number of arguments present in this call.
-/// Any orphaned expressions are deleted by this, and any new operands are set
-/// to null.
-void CallExpr::setNumArgs(const ASTContext& C, unsigned NumArgs) {
- // No change, just return.
- if (NumArgs == getNumArgs()) return;
-
- // If shrinking # arguments, just delete the extras and forgot them.
- if (NumArgs < getNumArgs()) {
- this->NumArgs = NumArgs;
- return;
- }
-
- // Otherwise, we are growing the # arguments. New an bigger argument array.
- unsigned NumPreArgs = getNumPreArgs();
- Stmt **NewSubExprs = new (C) Stmt*[NumArgs+PREARGS_START+NumPreArgs];
- // Copy over args.
- for (unsigned i = 0; i != getNumArgs()+PREARGS_START+NumPreArgs; ++i)
- NewSubExprs[i] = SubExprs[i];
- // Null out new args.
- for (unsigned i = getNumArgs()+PREARGS_START+NumPreArgs;
- i != NumArgs+PREARGS_START+NumPreArgs; ++i)
- NewSubExprs[i] = nullptr;
-
- if (SubExprs) C.Deallocate(SubExprs);
- SubExprs = NewSubExprs;
- this->NumArgs = NumArgs;
-}
-
/// getBuiltinCallee - If this is a call to a builtin, return the builtin ID. If
/// not, return 0.
unsigned CallExpr::getBuiltinCallee() const {
@@ -1446,7 +1451,7 @@ UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr(
// Check to see if we are in the situation where alignof(decl) should be
// dependent because decl's alignment is dependent.
- if (ExprKind == UETT_AlignOf) {
+ if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
if (!isValueDependent() || !isInstantiationDependent()) {
E = E->IgnoreParens();
@@ -1502,7 +1507,7 @@ MemberExpr *MemberExpr::Create(
QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
E->setInstantiationDependent(true);
- E->HasQualifierOrFoundDecl = true;
+ E->MemberExprBits.HasQualifierOrFoundDecl = true;
MemberExprNameQualifier *NQ =
E->getTrailingObjects<MemberExprNameQualifier>();
@@ -1510,7 +1515,8 @@ MemberExpr *MemberExpr::Create(
NQ->FoundDecl = founddecl;
}
- E->HasTemplateKWAndArgsInfo = (targs || TemplateKWLoc.isValid());
+ E->MemberExprBits.HasTemplateKWAndArgsInfo =
+ (targs || TemplateKWLoc.isValid());
if (targs) {
bool Dependent = false;
@@ -1605,13 +1611,18 @@ bool CastExpr::CastConsistency() const {
assert(getSubExpr()->getType()->isFunctionType());
goto CheckNoBasePath;
- case CK_AddressSpaceConversion:
- assert(getType()->isPointerType() || getType()->isBlockPointerType());
- assert(getSubExpr()->getType()->isPointerType() ||
- getSubExpr()->getType()->isBlockPointerType());
- assert(getType()->getPointeeType().getAddressSpace() !=
- getSubExpr()->getType()->getPointeeType().getAddressSpace());
- LLVM_FALLTHROUGH;
+ case CK_AddressSpaceConversion: {
+ auto Ty = getType();
+ auto SETy = getSubExpr()->getType();
+ assert(getValueKindForType(Ty) == Expr::getValueKindForType(SETy));
+ if (!isGLValue())
+ Ty = Ty->getPointeeType();
+ if (!isGLValue())
+ SETy = SETy->getPointeeType();
+ assert(!Ty.isNull() && !SETy.isNull() &&
+ Ty.getAddressSpace() != SETy.getAddressSpace());
+ goto CheckNoBasePath;
+ }
// These should not have an inheritance path.
case CK_Dynamic:
case CK_ToUnion:
@@ -1641,9 +1652,9 @@ bool CastExpr::CastConsistency() const {
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
assert(!getType()->isBooleanType() && "unheralded conversion to bool");
goto CheckNoBasePath;
@@ -1661,6 +1672,7 @@ bool CastExpr::CastConsistency() const {
case CK_LValueBitCast: // -> bool&
case CK_UserDefinedConversion: // operator bool()
case CK_BuiltinFnToFnPtr:
+ case CK_FixedPointToBoolean:
CheckNoBasePath:
assert(path_empty() && "Cast kind should not have a base path!");
break;
@@ -2559,6 +2571,10 @@ Expr *Expr::IgnoreParenCasts() {
E = NTTP->getReplacement();
continue;
}
+ if (FullExpr *FE = dyn_cast<FullExpr>(E)) {
+ E = FE->getSubExpr();
+ continue;
+ }
return E;
}
}
@@ -2580,6 +2596,10 @@ Expr *Expr::IgnoreCasts() {
E = NTTP->getReplacement();
continue;
}
+ if (FullExpr *FE = dyn_cast<FullExpr>(E)) {
+ E = FE->getSubExpr();
+ continue;
+ }
return E;
}
}
@@ -2605,6 +2625,9 @@ Expr *Expr::IgnoreParenLValueCasts() {
= dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
E = NTTP->getReplacement();
continue;
+ } else if (FullExpr *FE = dyn_cast<FullExpr>(E)) {
+ E = FE->getSubExpr();
+ continue;
}
break;
}
@@ -2646,6 +2669,10 @@ Expr *Expr::IgnoreParenImpCasts() {
E = NTTP->getReplacement();
continue;
}
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(E)) {
+ E = CE->getSubExpr();
+ continue;
+ }
return E;
}
}
@@ -2870,6 +2897,12 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
break;
}
+ case ConstantExprClass: {
+ // FIXME: We should be able to return "true" here, but it can lead to extra
+ // error messages. E.g. in Sema/array-init.c.
+ const Expr *Exp = cast<ConstantExpr>(this)->getSubExpr();
+ return Exp->isConstantInitializer(Ctx, false, Culprit);
+ }
case CompoundLiteralExprClass: {
// This handles gcc's extension that allows global initializers like
// "struct x {int x;} x = (struct x) {};".
@@ -2909,8 +2942,8 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
const Expr *Elt = ILE->getInit(ElementNo++);
if (Field->isBitField()) {
// Bitfields have to evaluate to an integer.
- llvm::APSInt ResultTmp;
- if (!Elt->EvaluateAsInt(ResultTmp, Ctx)) {
+ EvalResult Result;
+ if (!Elt->EvaluateAsInt(Result, Ctx)) {
if (Culprit)
*Culprit = Elt;
return false;
@@ -3095,6 +3128,11 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
// These never have a side-effect.
return false;
+ case ConstantExprClass:
+ // FIXME: Move this into the "return false;" block above.
+ return cast<ConstantExpr>(this)->getSubExpr()->HasSideEffects(
+ Ctx, IncludePossibleEffects);
+
case CallExprClass:
case CXXOperatorCallExprClass:
case CXXMemberCallExprClass:
@@ -3254,11 +3292,8 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case LambdaExprClass: {
const LambdaExpr *LE = cast<LambdaExpr>(this);
- for (LambdaExpr::capture_iterator I = LE->capture_begin(),
- E = LE->capture_end(); I != E; ++I)
- if (I->getCaptureKind() == LCK_ByCopy)
- // FIXME: Only has a side-effect if the variable is volatile or if
- // the copy would invoke a non-trivial copy constructor.
+ for (Expr *E : LE->capture_inits())
+ if (E->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
return false;
}
@@ -3389,20 +3424,20 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
// Check that it is a cast to void*.
if (const PointerType *PT = CE->getType()->getAs<PointerType>()) {
QualType Pointee = PT->getPointeeType();
+ Qualifiers Qs = Pointee.getQualifiers();
// Only (void*)0 or equivalent are treated as nullptr. If pointee type
// has non-default address space it is not treated as nullptr.
// (__generic void*)0 in OpenCL 2.0 should not be treated as nullptr
// since it cannot be assigned to a pointer to constant address space.
- bool PointeeHasDefaultAS =
- Pointee.getAddressSpace() == LangAS::Default ||
- (Ctx.getLangOpts().OpenCLVersion >= 200 &&
+ if ((Ctx.getLangOpts().OpenCLVersion >= 200 &&
Pointee.getAddressSpace() == LangAS::opencl_generic) ||
(Ctx.getLangOpts().OpenCL &&
Ctx.getLangOpts().OpenCLVersion < 200 &&
- Pointee.getAddressSpace() == LangAS::opencl_private);
+ Pointee.getAddressSpace() == LangAS::opencl_private))
+ Qs.removeAddressSpace();
- if (PointeeHasDefaultAS && Pointee->isVoidType() && // to void*
- CE->getSubExpr()->getType()->isIntegerType()) // from int.
+ if (Pointee->isVoidType() && Qs.empty() && // to void*
+ CE->getSubExpr()->getType()->isIntegerType()) // from int
return CE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
}
}
@@ -3952,27 +3987,48 @@ SourceLocation DesignatedInitUpdateExpr::getEndLoc() const {
return getBase()->getEndLoc();
}
-ParenListExpr::ParenListExpr(const ASTContext& C, SourceLocation lparenloc,
- ArrayRef<Expr*> exprs,
- SourceLocation rparenloc)
- : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
- false, false, false, false),
- NumExprs(exprs.size()), LParenLoc(lparenloc), RParenLoc(rparenloc) {
- Exprs = new (C) Stmt*[exprs.size()];
- for (unsigned i = 0; i != exprs.size(); ++i) {
- if (exprs[i]->isTypeDependent())
+ParenListExpr::ParenListExpr(SourceLocation LParenLoc, ArrayRef<Expr *> Exprs,
+ SourceLocation RParenLoc)
+ : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
+ ParenListExprBits.NumExprs = Exprs.size();
+
+ for (unsigned I = 0, N = Exprs.size(); I != N; ++I) {
+ if (Exprs[I]->isTypeDependent())
ExprBits.TypeDependent = true;
- if (exprs[i]->isValueDependent())
+ if (Exprs[I]->isValueDependent())
ExprBits.ValueDependent = true;
- if (exprs[i]->isInstantiationDependent())
+ if (Exprs[I]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
- if (exprs[i]->containsUnexpandedParameterPack())
+ if (Exprs[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
- Exprs[i] = exprs[i];
+ getTrailingObjects<Stmt *>()[I] = Exprs[I];
}
}
+ParenListExpr::ParenListExpr(EmptyShell Empty, unsigned NumExprs)
+ : Expr(ParenListExprClass, Empty) {
+ ParenListExprBits.NumExprs = NumExprs;
+}
+
+ParenListExpr *ParenListExpr::Create(const ASTContext &Ctx,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> Exprs,
+ SourceLocation RParenLoc) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(Exprs.size()),
+ alignof(ParenListExpr));
+ return new (Mem) ParenListExpr(LParenLoc, Exprs, RParenLoc);
+}
+
+ParenListExpr *ParenListExpr::CreateEmpty(const ASTContext &Ctx,
+ unsigned NumExprs) {
+ void *Mem =
+ Ctx.Allocate(totalSizeToAlloc<Stmt *>(NumExprs), alignof(ParenListExpr));
+ return new (Mem) ParenListExpr(EmptyShell(), NumExprs);
+}
+
const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
e = ewc->getSubExpr();
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index f14f5a361b..5ed4ea2ab7 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -749,14 +749,15 @@ const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
}
-CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &C, SourceLocation Loc,
- FieldDecl *Field, QualType T)
- : Expr(CXXDefaultInitExprClass, T.getNonLValueExprType(C),
- T->isLValueReferenceType() ? VK_LValue : T->isRValueReferenceType()
+CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx, SourceLocation Loc,
+ FieldDecl *Field, QualType Ty)
+ : Expr(CXXDefaultInitExprClass, Ty.getNonLValueExprType(Ctx),
+ Ty->isLValueReferenceType() ? VK_LValue : Ty->isRValueReferenceType()
? VK_XValue
: VK_RValue,
/*FIXME*/ OK_Ordinary, false, false, false, false),
- Field(Field), Loc(Loc) {
+ Field(Field) {
+ CXXDefaultInitExprBits.Loc = Loc;
assert(Field->hasInClassInitializer());
}
@@ -1044,12 +1045,7 @@ bool LambdaExpr::isMutable() const {
ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
bool CleanupsHaveSideEffects,
ArrayRef<CleanupObject> objects)
- : Expr(ExprWithCleanupsClass, subexpr->getType(),
- subexpr->getValueKind(), subexpr->getObjectKind(),
- subexpr->isTypeDependent(), subexpr->isValueDependent(),
- subexpr->isInstantiationDependent(),
- subexpr->containsUnexpandedParameterPack()),
- SubExpr(subexpr) {
+ : FullExpr(ExprWithCleanupsClass, subexpr) {
ExprWithCleanupsBits.CleanupsHaveSideEffects = CleanupsHaveSideEffects;
ExprWithCleanupsBits.NumObjects = objects.size();
for (unsigned i = 0, e = objects.size(); i != e; ++i)
@@ -1066,7 +1062,7 @@ ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, Expr *subexpr,
}
ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects)
- : Expr(ExprWithCleanupsClass, empty) {
+ : FullExpr(ExprWithCleanupsClass, empty) {
ExprWithCleanupsBits.NumObjects = numObjects;
}
@@ -1447,5 +1443,3 @@ TypeTraitExpr *TypeTraitExpr::CreateDeserialized(const ASTContext &C,
void *Mem = C.Allocate(totalSizeToAlloc<TypeSourceInfo *>(NumArgs));
return new (Mem) TypeTraitExpr(EmptyShell());
}
-
-void ArrayTypeTraitExpr::anchor() {}
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index e50dd9c79d..e1d6a1c9ed 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -194,6 +194,9 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::DesignatedInitUpdateExprClass:
return Cl::CL_PRValue;
+ case Expr::ConstantExprClass:
+ return ClassifyInternal(Ctx, cast<ConstantExpr>(E)->getSubExpr());
+
// Next come the complicated cases.
case Expr::SubstNonTypeTemplateParmExprClass:
return ClassifyInternal(Ctx,
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index a34940e53a..837dc9c2a8 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -39,11 +39,13 @@
#include "clang/AST/ASTLambda.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <functional>
@@ -143,8 +145,8 @@ namespace {
// If we're doing a variable assignment from e.g. malloc(N), there will
// probably be a cast of some kind. In exotic cases, we might also see a
// top-level ExprWithCleanups. Ignore them either way.
- if (const auto *EC = dyn_cast<ExprWithCleanups>(E))
- E = EC->getSubExpr()->IgnoreParens();
+ if (const auto *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr()->IgnoreParens();
if (const auto *Cast = dyn_cast<CastExpr>(E))
E = Cast->getSubExpr()->IgnoreParens();
@@ -505,7 +507,7 @@ namespace {
}
// FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
- // on the overall stack usage of deeply-recursing constexpr evaluataions.
+ // on the overall stack usage of deeply-recursing constexpr evaluations.
// (We should cache this map rather than recomputing it repeatedly.)
// But let's try this and see how it goes; we can look into caching the map
// as a later change.
@@ -720,6 +722,10 @@ namespace {
/// Whether or not we're currently speculatively evaluating.
bool IsSpeculativelyEvaluating;
+ /// Whether or not we're in a context where the front end requires a
+ /// constant value.
+ bool InConstantContext;
+
enum EvaluationMode {
/// Evaluate as a constant expression. Stop if we find that the expression
/// is not a constant expression.
@@ -759,18 +765,6 @@ namespace {
/// context we try to fold them immediately since the optimizer never
/// gets a chance to look at it.
EM_PotentialConstantExpressionUnevaluated,
-
- /// Evaluate as a constant expression. In certain scenarios, if:
- /// - we find a MemberExpr with a base that can't be evaluated, or
- /// - we find a variable initialized with a call to a function that has
- /// the alloc_size attribute on it
- /// then we may consider evaluation to have succeeded.
- ///
- /// In either case, the LValue returned shall have an invalid base; in the
- /// former, the base will be the invalid MemberExpr, in the latter, the
- /// base will be either the alloc_size CallExpr or a CastExpr wrapping
- /// said CallExpr.
- EM_OffsetFold,
} EvalMode;
/// Are we checking whether the expression is a potential constant
@@ -793,7 +787,7 @@ namespace {
EvaluatingDecl((const ValueDecl *)nullptr),
EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
HasFoldFailureDiagnostic(false), IsSpeculativelyEvaluating(false),
- EvalMode(Mode) {}
+ InConstantContext(false), EvalMode(Mode) {}
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value) {
EvaluatingDecl = Base;
@@ -874,7 +868,6 @@ namespace {
case EM_PotentialConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_PotentialConstantExpressionUnevaluated:
- case EM_OffsetFold:
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
@@ -966,7 +959,6 @@ namespace {
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
- case EM_OffsetFold:
return false;
}
llvm_unreachable("Missed EvalMode case");
@@ -985,7 +977,6 @@ namespace {
case EM_EvaluateForOverflow:
case EM_IgnoreSideEffects:
case EM_ConstantFold:
- case EM_OffsetFold:
return true;
case EM_PotentialConstantExpression:
@@ -1021,7 +1012,6 @@ namespace {
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
case EM_IgnoreSideEffects:
- case EM_OffsetFold:
return false;
}
llvm_unreachable("Missed EvalMode case");
@@ -1093,18 +1083,18 @@ namespace {
}
};
- /// RAII object used to treat the current evaluation as the correct pointer
- /// offset fold for the current EvalMode
- struct FoldOffsetRAII {
+ /// RAII object used to set the current evaluation mode to ignore
+ /// side-effects.
+ struct IgnoreSideEffectsRAII {
EvalInfo &Info;
EvalInfo::EvaluationMode OldMode;
- explicit FoldOffsetRAII(EvalInfo &Info)
+ explicit IgnoreSideEffectsRAII(EvalInfo &Info)
: Info(Info), OldMode(Info.EvalMode) {
if (!Info.checkingPotentialConstantExpression())
- Info.EvalMode = EvalInfo::EM_OffsetFold;
+ Info.EvalMode = EvalInfo::EM_IgnoreSideEffects;
}
- ~FoldOffsetRAII() { Info.EvalMode = OldMode; }
+ ~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; }
};
/// RAII object used to optionally suppress diagnostics and side-effects from
@@ -1300,6 +1290,14 @@ void EvalInfo::addCallStack(unsigned Limit) {
}
}
+/// Kinds of access we can perform on an object, for diagnostics.
+enum AccessKinds {
+ AK_Read,
+ AK_Assign,
+ AK_Increment,
+ AK_Decrement
+};
+
namespace {
struct ComplexValue {
private:
@@ -1405,21 +1403,36 @@ namespace {
set(B, true);
}
+ private:
// Check that this LValue is not based on a null pointer. If it is, produce
// a diagnostic and mark the designator as invalid.
- bool checkNullPointer(EvalInfo &Info, const Expr *E,
- CheckSubobjectKind CSK) {
+ template <typename GenDiagType>
+ bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) {
if (Designator.Invalid)
return false;
if (IsNullPtr) {
- Info.CCEDiag(E, diag::note_constexpr_null_subobject)
- << CSK;
+ GenDiag();
Designator.setInvalid();
return false;
}
return true;
}
+ public:
+ bool checkNullPointer(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ return checkNullPointerDiagnosingWith([&Info, E, CSK] {
+ Info.CCEDiag(E, diag::note_constexpr_null_subobject) << CSK;
+ });
+ }
+
+ bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E,
+ AccessKinds AK) {
+ return checkNullPointerDiagnosingWith([&Info, E, AK] {
+ Info.FFDiag(E, diag::note_constexpr_access_null) << AK;
+ });
+ }
+
// Check this LValue refers to an object. If not, set the designator to be
// invalid and emit a diagnostic.
bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
@@ -2089,11 +2102,12 @@ static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
QualType DestType, QualType SrcType,
const APSInt &Value) {
unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
- APSInt Result = Value;
// Figure out if this is a truncate, extend or noop cast.
// If the input is signed, do a sign extend, noop, or truncate.
- Result = Result.extOrTrunc(DestWidth);
+ APSInt Result = Value.extOrTrunc(DestWidth);
Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
+ if (DestType->isBooleanType())
+ Result = Value.getBoolValue();
return Result;
}
@@ -2755,14 +2769,6 @@ static bool diagnoseUnreadableFields(EvalInfo &Info, const Expr *E,
return false;
}
-/// Kinds of access we can perform on an object, for diagnostics.
-enum AccessKinds {
- AK_Read,
- AK_Assign,
- AK_Increment,
- AK_Decrement
-};
-
namespace {
/// A handle to a complete object (an object that is not a subobject of
/// another object).
@@ -4288,6 +4294,9 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
case Stmt::CaseStmtClass:
case Stmt::DefaultStmtClass:
return EvaluateStmt(Result, Info, cast<SwitchCase>(S)->getSubStmt(), Case);
+ case Stmt::CXXTryStmtClass:
+ // Evaluate try blocks by evaluating all sub statements.
+ return EvaluateStmt(Result, Info, cast<CXXTryStmt>(S)->getTryBlock(), Case);
}
}
@@ -4330,10 +4339,13 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
Declaration->isConstexpr())
return false;
- // Bail out with no diagnostic if the function declaration itself is invalid.
- // We will have produced a relevant diagnostic while parsing it.
- if (Declaration->isInvalidDecl())
+ // Bail out if the function declaration itself is invalid. We will
+ // have produced a relevant diagnostic while parsing it, so just
+ // note the problematic sub-expression.
+ if (Declaration->isInvalidDecl()) {
+ Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
return false;
+ }
// Can we evaluate this function call?
if (Definition && Definition->isConstexpr() &&
@@ -4731,6 +4743,8 @@ public:
return Error(E);
}
+ bool VisitConstantExpr(const ConstantExpr *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitParenExpr(const ParenExpr *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitUnaryExtension(const UnaryOperator *E)
@@ -5634,8 +5648,10 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
return false;
auto EvaluateAsSizeT = [&](const Expr *E, APSInt &Into) {
- if (!E->EvaluateAsInt(Into, Ctx, Expr::SE_AllowSideEffects))
+ Expr::EvalResult ExprResult;
+ if (!E->EvaluateAsInt(ExprResult, Ctx, Expr::SE_AllowSideEffects))
return false;
+ Into = ExprResult.Val.getInt();
if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
return false;
Into = Into.zextOrSelf(BitsInSizeT);
@@ -5861,11 +5877,7 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
// permitted in constant expressions in C++11. Bitcasts from cv void* are
// also static_casts, but we disallow them as a resolution to DR1312.
if (!E->getType()->isVoidPointerType()) {
- // If we changed anything other than cvr-qualifiers, we can't use this
- // value for constant folding. FIXME: Qualification conversions should
- // always be CK_NoOp, but we get this wrong in C.
- if (!Info.Ctx.hasCvrSimilarType(E->getType(), E->getSubExpr()->getType()))
- Result.Designator.setInvalid();
+ Result.Designator.setInvalid();
if (SubExpr->getType()->isVoidPointerType())
CCEDiag(E, diag::note_constexpr_invalid_cast)
<< 3 << SubExpr->getType();
@@ -5963,21 +5975,35 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
return ExprEvaluatorBaseTy::VisitCastExpr(E);
}
-static CharUnits GetAlignOfType(EvalInfo &Info, QualType T) {
+static CharUnits GetAlignOfType(EvalInfo &Info, QualType T,
+ UnaryExprOrTypeTrait ExprKind) {
// C++ [expr.alignof]p3:
// When alignof is applied to a reference type, the result is the
// alignment of the referenced type.
if (const ReferenceType *Ref = T->getAs<ReferenceType>())
T = Ref->getPointeeType();
- // __alignof is defined to return the preferred alignment.
if (T.getQualifiers().hasUnaligned())
return CharUnits::One();
- return Info.Ctx.toCharUnitsFromBits(
- Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
+
+ const bool AlignOfReturnsPreferred =
+ Info.Ctx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7;
+
+ // __alignof is defined to return the preferred alignment.
+ // Before 8, clang returned the preferred alignment for alignof and _Alignof
+ // as well.
+ if (ExprKind == UETT_PreferredAlignOf || AlignOfReturnsPreferred)
+ return Info.Ctx.toCharUnitsFromBits(
+ Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
+ // alignof and _Alignof are defined to return the ABI alignment.
+ else if (ExprKind == UETT_AlignOf)
+ return Info.Ctx.getTypeAlignInChars(T.getTypePtr());
+ else
+ llvm_unreachable("GetAlignOfType on a non-alignment ExprKind");
}
-static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E) {
+static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E,
+ UnaryExprOrTypeTrait ExprKind) {
E = E->IgnoreParens();
// The kinds of expressions that we have special-case logic here for
@@ -5994,7 +6020,7 @@ static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E) {
return Info.Ctx.getDeclAlign(ME->getMemberDecl(),
/*RefAsPointee*/true);
- return GetAlignOfType(Info, E->getType());
+ return GetAlignOfType(Info, E->getType(), ExprKind);
}
// To be clear: this happily visits unsupported builtins. Better name welcomed.
@@ -6055,8 +6081,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
OffsetResult.Base.dyn_cast<const ValueDecl*>()) {
BaseAlignment = Info.Ctx.getDeclAlign(VD);
} else {
- BaseAlignment =
- GetAlignOfExpr(Info, OffsetResult.Base.get<const Expr*>());
+ BaseAlignment = GetAlignOfExpr(
+ Info, OffsetResult.Base.get<const Expr *>(), UETT_AlignOf);
}
if (BaseAlignment < Align) {
@@ -6086,7 +6112,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return true;
}
-
+ case Builtin::BI__builtin_launder:
+ return evaluatePointer(E->getArg(0), Result);
case Builtin::BIstrchr:
case Builtin::BIwcschr:
case Builtin::BImemchr:
@@ -6118,9 +6145,27 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return false;
MaxLength = N.getExtValue();
}
-
- QualType CharTy = E->getArg(0)->getType()->getPointeeType();
-
+ // We cannot find the value if there are no candidates to match against.
+ if (MaxLength == 0u)
+ return ZeroInitialization(E);
+ if (!Result.checkNullPointerForFoldAccess(Info, E, AK_Read) ||
+ Result.Designator.Invalid)
+ return false;
+ QualType CharTy = Result.Designator.getType(Info.Ctx);
+ bool IsRawByte = BuiltinOp == Builtin::BImemchr ||
+ BuiltinOp == Builtin::BI__builtin_memchr;
+ assert(IsRawByte ||
+ Info.Ctx.hasSameUnqualifiedType(
+ CharTy, E->getArg(0)->getType()->getPointeeType()));
+ // Pointers to const void may point to objects of incomplete type.
+ if (IsRawByte && CharTy->isIncompleteType()) {
+ Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy;
+ return false;
+ }
+ // Give up on byte-oriented matching against multibyte elements.
+ // FIXME: We can compare the bytes in the correct order.
+ if (IsRawByte && Info.Ctx.getTypeSizeInChars(CharTy) != CharUnits::One())
+ return false;
// Figure out what value we're actually looking for (after converting to
// the corresponding unsigned type if necessary).
uint64_t DesiredVal;
@@ -6239,6 +6284,10 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
Info.FFDiag(E, diag::note_constexpr_memcpy_type_pun) << Move << SrcT << T;
return false;
}
+ if (T->isIncompleteType()) {
+ Info.FFDiag(E, diag::note_constexpr_memcpy_incomplete_type) << Move << T;
+ return false;
+ }
if (!T.isTriviallyCopyableType(Info.Ctx)) {
Info.FFDiag(E, diag::note_constexpr_memcpy_nontrivial) << Move << T;
return false;
@@ -7343,6 +7392,8 @@ public:
// Visitor Methods
//===--------------------------------------------------------------------===//
+ bool VisitConstantExpr(const ConstantExpr *E);
+
bool VisitIntegerLiteral(const IntegerLiteral *E) {
return Success(E->getValue(), E);
}
@@ -7642,6 +7693,9 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
@@ -8046,7 +8100,7 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
// If there are any, but we can determine the pointed-to object anyway, then
// ignore the side-effects.
SpeculativeEvaluationRAII SpeculativeEval(Info);
- FoldOffsetRAII Fold(Info);
+ IgnoreSideEffectsRAII Fold(Info);
if (E->isGLValue()) {
// It's possible for us to be given GLValues if we're called via
@@ -8080,6 +8134,11 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
return true;
}
+bool IntExprEvaluator::VisitConstantExpr(const ConstantExpr *E) {
+ llvm::SaveAndRestore<bool> InConstantContext(Info.InConstantContext, true);
+ return ExprEvaluatorBaseTy::VisitConstantExpr(E);
+}
+
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (unsigned BuiltinOp = E->getBuiltinCallee())
return VisitBuiltinCallExpr(E, BuiltinOp);
@@ -8114,7 +8173,6 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case EvalInfo::EM_ConstantFold:
case EvalInfo::EM_EvaluateForOverflow:
case EvalInfo::EM_IgnoreSideEffects:
- case EvalInfo::EM_OffsetFold:
// Leave it to IR generation.
return Error(E);
case EvalInfo::EM_ConstantExpressionUnevaluated:
@@ -8126,6 +8184,12 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
llvm_unreachable("unexpected EvalMode");
}
+ case Builtin::BI__builtin_os_log_format_buffer_size: {
+ analyze_os_log::OSLogBufferLayout Layout;
+ analyze_os_log::computeOSLogBufferLayout(Info.Ctx, E, Layout);
+ return Success(Layout.size().getQuantity(), E);
+ }
+
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
@@ -8162,8 +8226,20 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Val.countLeadingZeros(), E);
}
- case Builtin::BI__builtin_constant_p:
- return Success(EvaluateBuiltinConstantP(Info.Ctx, E->getArg(0)), E);
+ case Builtin::BI__builtin_constant_p: {
+ auto Arg = E->getArg(0);
+ if (EvaluateBuiltinConstantP(Info.Ctx, Arg))
+ return Success(true, E);
+ auto ArgTy = Arg->IgnoreImplicit()->getType();
+ if (!Info.InConstantContext && !Arg->HasSideEffects(Info.Ctx) &&
+ !ArgTy->isAggregateType() && !ArgTy->isPointerType()) {
+ // We can delay calculation of __builtin_constant_p until after
+ // inlining. Note: This diagnostic won't be shown to the user.
+ Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ return Success(false, E);
+ }
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
@@ -8343,8 +8419,6 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
!EvaluatePointer(E->getArg(1), String2, Info))
return false;
- QualType CharTy = E->getArg(0)->getType()->getPointeeType();
-
uint64_t MaxLength = uint64_t(-1);
if (BuiltinOp != Builtin::BIstrcmp &&
BuiltinOp != Builtin::BIwcscmp &&
@@ -8355,6 +8429,88 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return false;
MaxLength = N.getExtValue();
}
+
+ // Empty substrings compare equal by definition.
+ if (MaxLength == 0u)
+ return Success(0, E);
+
+ if (!String1.checkNullPointerForFoldAccess(Info, E, AK_Read) ||
+ !String2.checkNullPointerForFoldAccess(Info, E, AK_Read) ||
+ String1.Designator.Invalid || String2.Designator.Invalid)
+ return false;
+
+ QualType CharTy1 = String1.Designator.getType(Info.Ctx);
+ QualType CharTy2 = String2.Designator.getType(Info.Ctx);
+
+ bool IsRawByte = BuiltinOp == Builtin::BImemcmp ||
+ BuiltinOp == Builtin::BI__builtin_memcmp;
+
+ assert(IsRawByte ||
+ (Info.Ctx.hasSameUnqualifiedType(
+ CharTy1, E->getArg(0)->getType()->getPointeeType()) &&
+ Info.Ctx.hasSameUnqualifiedType(CharTy1, CharTy2)));
+
+ const auto &ReadCurElems = [&](APValue &Char1, APValue &Char2) {
+ return handleLValueToRValueConversion(Info, E, CharTy1, String1, Char1) &&
+ handleLValueToRValueConversion(Info, E, CharTy2, String2, Char2) &&
+ Char1.isInt() && Char2.isInt();
+ };
+ const auto &AdvanceElems = [&] {
+ return HandleLValueArrayAdjustment(Info, E, String1, CharTy1, 1) &&
+ HandleLValueArrayAdjustment(Info, E, String2, CharTy2, 1);
+ };
+
+ if (IsRawByte) {
+ uint64_t BytesRemaining = MaxLength;
+ // Pointers to const void may point to objects of incomplete type.
+ if (CharTy1->isIncompleteType()) {
+ Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy1;
+ return false;
+ }
+ if (CharTy2->isIncompleteType()) {
+ Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy2;
+ return false;
+ }
+ uint64_t CharTy1Width{Info.Ctx.getTypeSize(CharTy1)};
+ CharUnits CharTy1Size = Info.Ctx.toCharUnitsFromBits(CharTy1Width);
+ // Give up on comparing between elements with disparate widths.
+ if (CharTy1Size != Info.Ctx.getTypeSizeInChars(CharTy2))
+ return false;
+ uint64_t BytesPerElement = CharTy1Size.getQuantity();
+ assert(BytesRemaining && "BytesRemaining should not be zero: the "
+ "following loop considers at least one element");
+ while (true) {
+ APValue Char1, Char2;
+ if (!ReadCurElems(Char1, Char2))
+ return false;
+ // We have compatible in-memory widths, but a possible type and
+ // (for `bool`) internal representation mismatch.
+ // Assuming two's complement representation, including 0 for `false` and
+ // 1 for `true`, we can check an appropriate number of elements for
+ // equality even if they are not byte-sized.
+ APSInt Char1InMem = Char1.getInt().extOrTrunc(CharTy1Width);
+ APSInt Char2InMem = Char2.getInt().extOrTrunc(CharTy1Width);
+ if (Char1InMem.ne(Char2InMem)) {
+ // If the elements are byte-sized, then we can produce a three-way
+ // comparison result in a straightforward manner.
+ if (BytesPerElement == 1u) {
+ // memcmp always compares unsigned chars.
+ return Success(Char1InMem.ult(Char2InMem) ? -1 : 1, E);
+ }
+ // The result is byte-order sensitive, and we have multibyte elements.
+ // FIXME: We can compare the remaining bytes in the correct order.
+ return false;
+ }
+ if (!AdvanceElems())
+ return false;
+ if (BytesRemaining <= BytesPerElement)
+ break;
+ BytesRemaining -= BytesPerElement;
+ }
+ // Enough elements are equal to account for the memcmp limit.
+ return Success(0, E);
+ }
+
bool StopAtNull = (BuiltinOp != Builtin::BImemcmp &&
BuiltinOp != Builtin::BIwmemcmp &&
BuiltinOp != Builtin::BI__builtin_memcmp &&
@@ -8365,11 +8521,10 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
BuiltinOp == Builtin::BI__builtin_wcscmp ||
BuiltinOp == Builtin::BI__builtin_wcsncmp ||
BuiltinOp == Builtin::BI__builtin_wmemcmp;
+
for (; MaxLength; --MaxLength) {
APValue Char1, Char2;
- if (!handleLValueToRValueConversion(Info, E, CharTy, String1, Char1) ||
- !handleLValueToRValueConversion(Info, E, CharTy, String2, Char2) ||
- !Char1.isInt() || !Char2.isInt())
+ if (!ReadCurElems(Char1, Char2))
return false;
if (Char1.getInt() != Char2.getInt()) {
if (IsWide) // wmemcmp compares with wchar_t signedness.
@@ -8380,8 +8535,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (StopAtNull && !Char1.getInt())
return Success(0, E);
assert(!(StopAtNull && !Char2.getInt()));
- if (!HandleLValueArrayAdjustment(Info, E, String1, CharTy, 1) ||
- !HandleLValueArrayAdjustment(Info, E, String2, CharTy, 1))
+ if (!AdvanceElems())
return false;
}
// We hit the strncmp / memcmp limit.
@@ -9372,11 +9526,14 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *E) {
switch(E->getKind()) {
+ case UETT_PreferredAlignOf:
case UETT_AlignOf: {
if (E->isArgumentType())
- return Success(GetAlignOfType(Info, E->getArgumentType()), E);
+ return Success(GetAlignOfType(Info, E->getArgumentType(), E->getKind()),
+ E);
else
- return Success(GetAlignOfExpr(Info, E->getArgumentExpr()), E);
+ return Success(GetAlignOfExpr(Info, E->getArgumentExpr(), E->getKind()),
+ E);
}
case UETT_VecStep: {
@@ -9565,11 +9722,11 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
llvm_unreachable("invalid cast kind for integral value");
case CK_BitCast:
@@ -9604,6 +9761,14 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
return Success(IntResult, E);
}
+ case CK_FixedPointToBoolean: {
+ // Unsigned padding does not affect this.
+ APValue Val;
+ if (!Evaluate(Val, Info, SubExpr))
+ return false;
+ return Success(Val.getInt().getBoolValue(), E);
+ }
+
case CK_IntegralCast: {
if (!Visit(SubExpr))
return false;
@@ -10099,11 +10264,12 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
@@ -10264,7 +10430,7 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
case BO_Mul:
if (Result.isComplexFloat()) {
// This is an implementation of complex multiplication according to the
- // constraints laid out in C11 Annex G. The implemention uses the
+ // constraints laid out in C11 Annex G. The implementation uses the
// following naming scheme:
// (a + ib) * (c + id)
ComplexValue LHS = Result;
@@ -10345,7 +10511,7 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
case BO_Div:
if (Result.isComplexFloat()) {
// This is an implementation of complex division according to the
- // constraints laid out in C11 Annex G. The implemention uses the
+ // constraints laid out in C11 Annex G. The implementation uses the
// following naming scheme:
// (a + ib) / (c + id)
ComplexValue LHS = Result;
@@ -10721,19 +10887,46 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
return false;
}
+static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
+ Expr::SideEffectsKind SEK) {
+ return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) ||
+ (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior);
+}
+
+static bool EvaluateAsRValue(const Expr *E, Expr::EvalResult &Result,
+ const ASTContext &Ctx, EvalInfo &Info) {
+ bool IsConst;
+ if (FastEvaluateAsRValue(E, Result, Ctx, IsConst))
+ return IsConst;
+
+ return EvaluateAsRValue(Info, E, Result.Val);
+}
+
+static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult,
+ const ASTContext &Ctx,
+ Expr::SideEffectsKind AllowSideEffects,
+ EvalInfo &Info) {
+ if (!E->getType()->isIntegralOrEnumerationType())
+ return false;
+
+ if (!::EvaluateAsRValue(E, ExprResult, Ctx, Info) ||
+ !ExprResult.Val.isInt() ||
+ hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
+ return false;
+
+ return true;
+}
/// EvaluateAsRValue - Return true if this is a constant which we can fold using
/// any crazy technique (that has nothing to do with language standards) that
/// we want to. If this function returns true, it returns the folded constant
/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
/// will be applied to the result.
-bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const {
- bool IsConst;
- if (FastEvaluateAsRValue(this, Result, Ctx, IsConst))
- return IsConst;
-
+bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx,
+ bool InConstantContext) const {
EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
- return ::EvaluateAsRValue(Info, this, Result.Val);
+ Info.InConstantContext = InConstantContext;
+ return ::EvaluateAsRValue(this, Result, Ctx, Info);
}
bool Expr::EvaluateAsBooleanCondition(bool &Result,
@@ -10743,24 +10936,10 @@ bool Expr::EvaluateAsBooleanCondition(bool &Result,
HandleConversionToBool(Scratch.Val, Result);
}
-static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
- Expr::SideEffectsKind SEK) {
- return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) ||
- (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior);
-}
-
-bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx,
+bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx,
SideEffectsKind AllowSideEffects) const {
- if (!getType()->isIntegralOrEnumerationType())
- return false;
-
- EvalResult ExprResult;
- if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isInt() ||
- hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
- return false;
-
- Result = ExprResult.Val.getInt();
- return true;
+ EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
+ return ::EvaluateAsInt(this, Result, Ctx, AllowSideEffects, Info);
}
bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx,
@@ -10818,6 +10997,7 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
? EvalInfo::EM_ConstantExpression
: EvalInfo::EM_ConstantFold);
InitInfo.setEvaluatingDecl(VD, Value);
+ InitInfo.InConstantContext = true;
LValue LVal;
LVal.set(VD);
@@ -10847,28 +11027,46 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
/// constant folded, but discard the result.
bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const {
EvalResult Result;
- return EvaluateAsRValue(Result, Ctx) &&
+ return EvaluateAsRValue(Result, Ctx, /* in constant context */ true) &&
!hasUnacceptableSideEffect(Result, SEK);
}
APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx,
SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
- EvalResult EvalResult;
- EvalResult.Diag = Diag;
- bool Result = EvaluateAsRValue(EvalResult, Ctx);
+ EvalResult EVResult;
+ EVResult.Diag = Diag;
+ EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects);
+ Info.InConstantContext = true;
+
+ bool Result = ::EvaluateAsRValue(this, EVResult, Ctx, Info);
(void)Result;
assert(Result && "Could not evaluate expression");
- assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer");
+ assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
- return EvalResult.Val.getInt();
+ return EVResult.Val.getInt();
+}
+
+APSInt Expr::EvaluateKnownConstIntCheckOverflow(
+ const ASTContext &Ctx, SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
+ EvalResult EVResult;
+ EVResult.Diag = Diag;
+ EvalInfo Info(Ctx, EVResult, EvalInfo::EM_EvaluateForOverflow);
+ Info.InConstantContext = true;
+
+ bool Result = ::EvaluateAsRValue(Info, this, EVResult.Val);
+ (void)Result;
+ assert(Result && "Could not evaluate expression");
+ assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
+
+ return EVResult.Val.getInt();
}
void Expr::EvaluateForOverflow(const ASTContext &Ctx) const {
bool IsConst;
- EvalResult EvalResult;
- if (!FastEvaluateAsRValue(this, EvalResult, Ctx, IsConst)) {
- EvalInfo Info(Ctx, EvalResult, EvalInfo::EM_EvaluateForOverflow);
- (void)::EvaluateAsRValue(Info, this, EvalResult.Val);
+ EvalResult EVResult;
+ if (!FastEvaluateAsRValue(this, EVResult, Ctx, IsConst)) {
+ EvalInfo Info(Ctx, EVResult, EvalInfo::EM_EvaluateForOverflow);
+ (void)::EvaluateAsRValue(Info, this, EVResult.Val);
}
}
@@ -10921,7 +11119,11 @@ static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; }
static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
Expr::EvalResult EVResult;
- if (!E->EvaluateAsRValue(EVResult, Ctx) || EVResult.HasSideEffects ||
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression);
+
+ Info.InConstantContext = true;
+ if (!::EvaluateAsRValue(E, EVResult, Ctx, Info) || EVResult.HasSideEffects ||
!EVResult.Val.isInt())
return ICEDiag(IK_NotICE, E->getBeginLoc());
@@ -11037,6 +11239,9 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
return
CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
+ case Expr::ConstantExprClass:
+ return CheckICE(cast<ConstantExpr>(E)->getSubExpr(), Ctx);
+
case Expr::ParenExprClass:
return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
case Expr::GenericSelectionExprClass:
@@ -11115,9 +11320,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case UO_Imag:
return CheckICE(Exp->getSubExpr(), Ctx);
}
-
- // OffsetOf falls through here.
- LLVM_FALLTHROUGH;
+ llvm_unreachable("invalid unary operator class");
}
case Expr::OffsetOfExprClass: {
// Note that per C99, offsetof must be an ICE. And AFAIK, using
@@ -11221,7 +11424,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
return Worst(LHSResult, RHSResult);
}
}
- LLVM_FALLTHROUGH;
+ llvm_unreachable("invalid binary operator kind");
}
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
@@ -11358,12 +11561,20 @@ bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
if (!isIntegerConstantExpr(Ctx, Loc))
return false;
+
// The only possible side-effects here are due to UB discovered in the
// evaluation (for instance, INT_MAX + 1). In such a case, we are still
// required to treat the expression as an ICE, so we produce the folded
// value.
- if (!EvaluateAsInt(Value, Ctx, SE_AllowSideEffects))
+ EvalResult ExprResult;
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_IgnoreSideEffects);
+ Info.InConstantContext = true;
+
+ if (!::EvaluateAsInt(this, ExprResult, Ctx, SE_AllowSideEffects, Info))
llvm_unreachable("ICE cannot be evaluated!");
+
+ Value = ExprResult.Val.getInt();
return true;
}
@@ -11449,6 +11660,7 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
EvalInfo Info(FD->getASTContext(), Status,
EvalInfo::EM_PotentialConstantExpression);
+ Info.InConstantContext = true;
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : nullptr;
diff --git a/lib/AST/ExternalASTMerger.cpp b/lib/AST/ExternalASTMerger.cpp
index ae28c588ca..12e6bfc041 100644
--- a/lib/AST/ExternalASTMerger.cpp
+++ b/lib/AST/ExternalASTMerger.cpp
@@ -144,14 +144,14 @@ public:
}
if (auto *ToTag = dyn_cast<TagDecl>(To)) {
ToTag->setHasExternalLexicalStorage();
- ToTag->setMustBuildLookupTable();
+ ToTag->getPrimaryContext()->setMustBuildLookupTable();
assert(Parent.CanComplete(ToTag));
} else if (auto *ToNamespace = dyn_cast<NamespaceDecl>(To)) {
ToNamespace->setHasExternalVisibleStorage();
assert(Parent.CanComplete(ToNamespace));
} else if (auto *ToContainer = dyn_cast<ObjCContainerDecl>(To)) {
ToContainer->setHasExternalLexicalStorage();
- ToContainer->setMustBuildLookupTable();
+ ToContainer->getPrimaryContext()->setMustBuildLookupTable();
assert(Parent.CanComplete(ToContainer));
}
return To;
@@ -230,7 +230,8 @@ void ExternalASTMerger::CompleteType(TagDecl *Tag) {
if (!SourceTag->getDefinition())
return false;
Forward.MapImported(SourceTag, Tag);
- Forward.ImportDefinition(SourceTag);
+ if (llvm::Error Err = Forward.ImportDefinition_New(SourceTag))
+ llvm::consumeError(std::move(Err));
Tag->setCompleteDefinition(SourceTag->isCompleteDefinition());
return true;
});
@@ -249,7 +250,8 @@ void ExternalASTMerger::CompleteType(ObjCInterfaceDecl *Interface) {
if (!SourceInterface->getDefinition())
return false;
Forward.MapImported(SourceInterface, Interface);
- Forward.ImportDefinition(SourceInterface);
+ if (llvm::Error Err = Forward.ImportDefinition_New(SourceInterface))
+ llvm::consumeError(std::move(Err));
return true;
});
}
diff --git a/lib/Analysis/FormatString.cpp b/lib/AST/FormatString.cpp
index 0bab50c569..04bd48f14a 100644
--- a/lib/Analysis/FormatString.cpp
+++ b/lib/AST/FormatString.cpp
@@ -179,6 +179,36 @@ clang::analyze_format_string::ParseArgPosition(FormatStringHandler &H,
}
bool
+clang::analyze_format_string::ParseVectorModifier(FormatStringHandler &H,
+ FormatSpecifier &FS,
+ const char *&I,
+ const char *E,
+ const LangOptions &LO) {
+ if (!LO.OpenCL)
+ return false;
+
+ const char *Start = I;
+ if (*I == 'v') {
+ ++I;
+
+ if (I == E) {
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ OptionalAmount NumElts = ParseAmount(I, E);
+ if (NumElts.getHowSpecified() != OptionalAmount::Constant) {
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ FS.setVectorNumElts(NumElts);
+ }
+
+ return false;
+}
+
+bool
clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
const char *&I,
const char *E,
@@ -457,6 +487,14 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
llvm_unreachable("Invalid ArgType Kind!");
}
+ArgType ArgType::makeVectorType(ASTContext &C, unsigned NumElts) const {
+ if (K != SpecificTy) // Won't be a valid vector element type.
+ return ArgType::Invalid();
+
+ QualType Vec = C.getExtVectorType(T, NumElts);
+ return ArgType(Vec, Name);
+}
+
QualType ArgType::getRepresentativeType(ASTContext &C) const {
QualType Res;
switch (K) {
@@ -689,7 +727,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
break;
}
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case LengthModifier::AsChar:
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
@@ -874,7 +912,7 @@ bool FormatSpecifier::hasStandardConversionSpecifier(
return true;
case ConversionSpecifier::CArg:
case ConversionSpecifier::SArg:
- return LangOpt.ObjC1 || LangOpt.ObjC2;
+ return LangOpt.ObjC;
case ConversionSpecifier::InvalidSpecifier:
case ConversionSpecifier::FreeBSDbArg:
case ConversionSpecifier::FreeBSDDArg:
diff --git a/lib/Analysis/FormatStringParsing.h b/lib/AST/FormatStringParsing.h
index a63140b366..9da829adcb 100644
--- a/lib/Analysis/FormatStringParsing.h
+++ b/lib/AST/FormatStringParsing.h
@@ -3,7 +3,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
-#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/AST/FormatString.h"
namespace clang {
@@ -41,6 +41,10 @@ bool ParseArgPosition(FormatStringHandler &H,
FormatSpecifier &CS, const char *Start,
const char *&Beg, const char *E);
+bool ParseVectorModifier(FormatStringHandler &H,
+ FormatSpecifier &FS, const char *&Beg, const char *E,
+ const LangOptions &LO);
+
/// Returns true if a LengthModifier was parsed and installed in the
/// FormatSpecifier& argument, and false otherwise.
bool ParseLengthModifier(FormatSpecifier &FS, const char *&Beg, const char *E,
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index e99549850a..d8d56216b1 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -33,12 +33,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#define MANGLE_CHECKER 0
-
-#if MANGLE_CHECKER
-#include <cxxabi.h>
-#endif
-
using namespace clang;
namespace {
@@ -415,17 +409,6 @@ public:
SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth),
AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {}
-#if MANGLE_CHECKER
- ~CXXNameMangler() {
- if (Out.str()[0] == '\01')
- return;
-
- int status = 0;
- char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
- assert(status == 0 && "Could not demangle mangled name!");
- free(result);
- }
-#endif
raw_ostream &getStream() { return Out; }
void disableDerivedAbiTags() { DisableDerivedAbiTags = true; }
@@ -1520,8 +1503,7 @@ void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
Out << 'N';
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) {
- Qualifiers MethodQuals =
- Qualifiers::fromCVRUMask(Method->getTypeQualifiers());
+ Qualifiers MethodQuals = Method->getTypeQualifiers();
// We do not consider restrict a distinguishing attribute for overloading
// purposes so we must not mangle it.
MethodQuals.removeRestrict();
@@ -2652,6 +2634,12 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::OCLReserveID:
Out << "13ocl_reserveid";
break;
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ type_name = "ocl_" #ExtType; \
+ Out << type_name.size() << type_name; \
+ break;
+#include "clang/Basic/OpenCLExtensionTypes.def"
}
}
@@ -2670,6 +2658,7 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
case CC_X86RegCall:
case CC_AAPCS:
case CC_AAPCS_VFP:
+ case CC_AArch64VectorCall:
case CC_IntelOclBicc:
case CC_SpirFunction:
case CC_OpenCLKernel:
@@ -2735,7 +2724,7 @@ void CXXNameMangler::mangleType(const FunctionProtoType *T) {
// Mangle CV-qualifiers, if present. These are 'this' qualifiers,
// e.g. "const" in "int (A::*)() const".
- mangleQualifiers(Qualifiers::fromCVRUMask(T->getTypeQuals()));
+ mangleQualifiers(T->getTypeQuals());
// Mangle instantiation-dependent exception-specification, if present,
// per cxx-abi-dev proposal on 2016-10-11.
@@ -3524,6 +3513,10 @@ recurse:
case Expr::CXXInheritedCtorInitExprClass:
llvm_unreachable("unexpected statement kind");
+ case Expr::ConstantExprClass:
+ E = cast<ConstantExpr>(E)->getSubExpr();
+ goto recurse;
+
// FIXME: invent manglings for all these.
case Expr::BlockExprClass:
case Expr::ChooseExprClass:
@@ -3881,6 +3874,7 @@ recurse:
case UETT_SizeOf:
Out << 's';
break;
+ case UETT_PreferredAlignOf:
case UETT_AlignOf:
Out << 'a';
break;
diff --git a/lib/AST/Mangle.cpp b/lib/AST/Mangle.cpp
index b0e5146e81..bb29bffc1b 100644
--- a/lib/AST/Mangle.cpp
+++ b/lib/AST/Mangle.cpp
@@ -25,12 +25,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#define MANGLE_CHECKER 0
-
-#if MANGLE_CHECKER
-#include <cxxabi.h>
-#endif
-
using namespace clang;
// FIXME: For blocks we currently mimic GCC's mangling scheme, which leaves
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 1d4bdaa760..821112e7a9 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -309,7 +309,7 @@ public:
const MethodVFTableLocation &ML);
void mangleNumber(int64_t Number);
void mangleTagTypeKind(TagTypeKind TK);
- void mangleArtificalTagType(TagTypeKind TK, StringRef UnqualifiedName,
+ void mangleArtificialTagType(TagTypeKind TK, StringRef UnqualifiedName,
ArrayRef<StringRef> NestedNames = None);
void mangleType(QualType T, SourceRange Range,
QualifierMangleMode QMM = QMM_Mangle);
@@ -482,7 +482,7 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD));
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleVariableEncoding(VD);
- else if (!isa<ObjCInterfaceDecl>(D))
+ else
llvm_unreachable("Tried to mangle unexpected NamedDecl!");
}
@@ -1070,7 +1070,7 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
if (PointersAre64Bit)
Out << 'E';
Out << 'A';
- mangleArtificalTagType(TTK_Struct,
+ mangleArtificialTagType(TTK_Struct,
Discriminate("__block_literal", Discriminator,
ParameterDiscriminator));
Out << "@Z";
@@ -1365,7 +1365,7 @@ void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
// It's a global variable.
Out << '3';
// It's a struct called __s_GUID.
- mangleArtificalTagType(TTK_Struct, "__s_GUID");
+ mangleArtificialTagType(TTK_Struct, "__s_GUID");
// It's const.
Out << 'B';
return;
@@ -1521,9 +1521,9 @@ void MicrosoftCXXNameMangler::mangleObjCProtocol(const ObjCProtocolDecl *PD) {
Stream << "?$";
Extra.mangleSourceName("Protocol");
- Extra.mangleArtificalTagType(TTK_Struct, PD->getName());
+ Extra.mangleArtificialTagType(TTK_Struct, PD->getName());
- mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
}
void MicrosoftCXXNameMangler::mangleObjCLifetime(const QualType Type,
@@ -1552,7 +1552,7 @@ void MicrosoftCXXNameMangler::mangleObjCLifetime(const QualType Type,
Extra.manglePointerExtQualifiers(Quals, Type);
Extra.mangleType(Type, Range);
- mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
}
void MicrosoftCXXNameMangler::mangleObjCKindOfType(const ObjCObjectType *T,
@@ -1569,7 +1569,7 @@ void MicrosoftCXXNameMangler::mangleObjCKindOfType(const ObjCObjectType *T,
->getAs<ObjCObjectType>(),
Quals, Range);
- mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
}
void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
@@ -1765,7 +1765,7 @@ void MicrosoftCXXNameMangler::manglePassObjectSizeArg(
ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
if (Found == TypeBackReferences.end()) {
- mangleArtificalTagType(TTK_Enum, "__pass_object_size" + llvm::utostr(Type),
+ mangleArtificialTagType(TTK_Enum, "__pass_object_size" + llvm::utostr(Type),
{"__clang"});
if (TypeBackReferences.size() < 10) {
@@ -1951,13 +1951,13 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
llvm_unreachable("placeholder types shouldn't get to name mangling");
case BuiltinType::ObjCId:
- mangleArtificalTagType(TTK_Struct, ".objc_object");
+ mangleArtificialTagType(TTK_Struct, "objc_object");
break;
case BuiltinType::ObjCClass:
- mangleArtificalTagType(TTK_Struct, ".objc_class");
+ mangleArtificialTagType(TTK_Struct, "objc_class");
break;
case BuiltinType::ObjCSel:
- mangleArtificalTagType(TTK_Struct, ".objc_selector");
+ mangleArtificialTagType(TTK_Struct, "objc_selector");
break;
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
@@ -1967,35 +1967,40 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_sampler");
+ mangleArtificialTagType(TTK_Struct, "ocl_sampler");
break;
case BuiltinType::OCLEvent:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_event");
+ mangleArtificialTagType(TTK_Struct, "ocl_event");
break;
case BuiltinType::OCLClkEvent:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_clkevent");
+ mangleArtificialTagType(TTK_Struct, "ocl_clkevent");
break;
case BuiltinType::OCLQueue:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_queue");
+ mangleArtificialTagType(TTK_Struct, "ocl_queue");
break;
case BuiltinType::OCLReserveID:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_reserveid");
+ mangleArtificialTagType(TTK_Struct, "ocl_reserveid");
+ break;
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ mangleArtificialTagType(TTK_Struct, "ocl_" #ExtType); \
break;
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::NullPtr:
Out << "$$T";
break;
case BuiltinType::Float16:
- mangleArtificalTagType(TTK_Struct, "_Float16", {"__clang"});
+ mangleArtificialTagType(TTK_Struct, "_Float16", {"__clang"});
break;
case BuiltinType::Half:
- mangleArtificalTagType(TTK_Struct, "_Half", {"__clang"});
+ mangleArtificialTagType(TTK_Struct, "_Half", {"__clang"});
break;
case BuiltinType::ShortAccum:
@@ -2088,7 +2093,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
// If this is a C++ instance method, mangle the CVR qualifiers for the
// this pointer.
if (HasThisQuals) {
- Qualifiers Quals = Qualifiers::fromCVRUMask(Proto->getTypeQuals());
+ Qualifiers Quals = Proto->getTypeQuals();
manglePointerExtQualifiers(Quals, /*PointeeType=*/QualType());
mangleRefQualifier(Proto->getRefQualifier());
mangleQualifiers(Quals, /*IsMember=*/false);
@@ -2352,7 +2357,7 @@ void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
}
// If you add a call to this, consider updating isArtificialTagType() too.
-void MicrosoftCXXNameMangler::mangleArtificalTagType(
+void MicrosoftCXXNameMangler::mangleArtificialTagType(
TagTypeKind TK, StringRef UnqualifiedName,
ArrayRef<StringRef> NestedNames) {
// <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
@@ -2538,10 +2543,10 @@ void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, Qualifiers,
Extra.mangleSourceName("_Complex");
Extra.mangleType(ElementType, Range, QMM_Escape);
- mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
-// Returns true for types that mangleArtificalTagType() gets called for with
+// Returns true for types that mangleArtificialTagType() gets called for with
// TTK_Union, TTK_Struct, TTK_Class and where compatibility with MSVC's
// mangling matters.
// (It doesn't matter for Objective-C types and the like that cl.exe doesn't
@@ -2554,7 +2559,7 @@ bool MicrosoftCXXNameMangler::isArtificialTagType(QualType T) const {
case Type::Vector: {
// For ABI compatibility only __m64, __m128(id), and __m256(id) matter,
- // but since mangleType(VectorType*) always calls mangleArtificalTagType()
+ // but since mangleType(VectorType*) always calls mangleArtificialTagType()
// just always return true (the other vector types are clang-only).
return true;
}
@@ -2569,18 +2574,20 @@ void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
// Pattern match exactly the typedefs in our intrinsic headers. Anything that
// doesn't match the Intel types uses a custom mangling below.
size_t OutSizeBefore = Out.tell();
- llvm::Triple::ArchType AT =
- getASTContext().getTargetInfo().getTriple().getArch();
- if (AT == llvm::Triple::x86 || AT == llvm::Triple::x86_64) {
- if (Width == 64 && ET->getKind() == BuiltinType::LongLong) {
- mangleArtificalTagType(TTK_Union, "__m64");
- } else if (Width >= 128) {
- if (ET->getKind() == BuiltinType::Float)
- mangleArtificalTagType(TTK_Union, "__m" + llvm::utostr(Width));
- else if (ET->getKind() == BuiltinType::LongLong)
- mangleArtificalTagType(TTK_Union, "__m" + llvm::utostr(Width) + 'i');
- else if (ET->getKind() == BuiltinType::Double)
- mangleArtificalTagType(TTK_Struct, "__m" + llvm::utostr(Width) + 'd');
+ if (!isa<ExtVectorType>(T)) {
+ llvm::Triple::ArchType AT =
+ getASTContext().getTargetInfo().getTriple().getArch();
+ if (AT == llvm::Triple::x86 || AT == llvm::Triple::x86_64) {
+ if (Width == 64 && ET->getKind() == BuiltinType::LongLong) {
+ mangleArtificialTagType(TTK_Union, "__m64");
+ } else if (Width >= 128) {
+ if (ET->getKind() == BuiltinType::Float)
+ mangleArtificialTagType(TTK_Union, "__m" + llvm::utostr(Width));
+ else if (ET->getKind() == BuiltinType::LongLong)
+ mangleArtificialTagType(TTK_Union, "__m" + llvm::utostr(Width) + 'i');
+ else if (ET->getKind() == BuiltinType::Double)
+ mangleArtificialTagType(TTK_Struct, "__m" + llvm::utostr(Width) + 'd');
+ }
}
}
@@ -2599,7 +2606,7 @@ void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumElements()),
/*IsBoolean=*/false);
- mangleArtificalTagType(TTK_Union, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TTK_Union, TemplateMangling, {"__clang"});
}
}
@@ -2637,10 +2644,9 @@ void MicrosoftCXXNameMangler::mangleType(const DependentAddressSpaceType *T,
void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers,
SourceRange) {
- // ObjC interfaces are mangled as if they were structs with a name that is
- // not a valid C/C++ identifier
+ // ObjC interfaces have structs underlying them.
mangleTagTypeKind(TTK_Struct);
- mangle(T->getDecl(), ".objc_cls_");
+ mangleName(T->getDecl());
}
void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T,
@@ -2661,11 +2667,11 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T,
Out << "?$";
if (T->isObjCId())
- mangleSourceName(".objc_object");
+ mangleSourceName("objc_object");
else if (T->isObjCClass())
- mangleSourceName(".objc_class");
+ mangleSourceName("objc_class");
else
- mangleSourceName((".objc_cls_" + T->getInterface()->getName()).str());
+ mangleSourceName(T->getInterface()->getName());
for (const auto &Q : T->quals())
mangleObjCProtocol(Q);
@@ -2804,7 +2810,7 @@ void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers,
Extra.mangleSourceName("_Atomic");
Extra.mangleType(ValueType, Range, QMM_Escape);
- mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
void MicrosoftCXXNameMangler::mangleType(const PipeType *T, Qualifiers,
diff --git a/lib/AST/NSAPI.cpp b/lib/AST/NSAPI.cpp
index 9d591313f2..5b8300893e 100644
--- a/lib/AST/NSAPI.cpp
+++ b/lib/AST/NSAPI.cpp
@@ -475,6 +475,9 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
@@ -509,7 +512,7 @@ bool NSAPI::isObjCNSUIntegerType(QualType T) const {
}
StringRef NSAPI::GetNSIntegralKind(QualType T) const {
- if (!Ctx.getLangOpts().ObjC1 || T.isNull())
+ if (!Ctx.getLangOpts().ObjC || T.isNull())
return StringRef();
while (const TypedefType *TDT = T->getAs<TypedefType>()) {
@@ -561,7 +564,7 @@ bool NSAPI::isSubclassOfNSClass(ObjCInterfaceDecl *InterfaceDecl,
bool NSAPI::isObjCTypedef(QualType T,
StringRef name, IdentifierInfo *&II) const {
- if (!Ctx.getLangOpts().ObjC1)
+ if (!Ctx.getLangOpts().ObjC)
return false;
if (T.isNull())
return false;
@@ -580,7 +583,7 @@ bool NSAPI::isObjCTypedef(QualType T,
bool NSAPI::isObjCEnumerator(const Expr *E,
StringRef name, IdentifierInfo *&II) const {
- if (!Ctx.getLangOpts().ObjC1)
+ if (!Ctx.getLangOpts().ObjC)
return false;
if (!E)
return false;
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index df23972af2..548f2f8e3b 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -270,9 +271,8 @@ bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
/// Print this nested name specifier to the given output
/// stream.
-void
-NestedNameSpecifier::print(raw_ostream &OS,
- const PrintingPolicy &Policy) const {
+void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool ResolveTemplateArguments) const {
if (getPrefix())
getPrefix()->print(OS, Policy);
@@ -305,6 +305,15 @@ NestedNameSpecifier::print(raw_ostream &OS,
LLVM_FALLTHROUGH;
case TypeSpec: {
+ const auto *Record =
+ dyn_cast_or_null<ClassTemplateSpecializationDecl>(getAsRecordDecl());
+ if (ResolveTemplateArguments && Record) {
+ // Print the type trait with resolved template parameters.
+ Record->printName(OS);
+ printTemplateArgumentList(OS, Record->getTemplateArgs().asArray(),
+ Policy);
+ break;
+ }
const Type *T = getAsType();
PrintingPolicy InnerPolicy(Policy);
@@ -339,13 +348,20 @@ NestedNameSpecifier::print(raw_ostream &OS,
OS << "::";
}
-void NestedNameSpecifier::dump(const LangOptions &LO) const {
- print(llvm::errs(), PrintingPolicy(LO));
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(const LangOptions &LO) const {
+ dump(llvm::errs(), LO);
}
-LLVM_DUMP_METHOD void NestedNameSpecifier::dump() const {
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump() const { dump(llvm::errs()); }
+
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream &OS) const {
LangOptions LO;
- print(llvm::errs(), PrintingPolicy(LO));
+ dump(OS, LO);
+}
+
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream &OS,
+ const LangOptions &LO) const {
+ print(OS, PrintingPolicy(LO));
}
unsigned
diff --git a/lib/AST/ODRHash.cpp b/lib/AST/ODRHash.cpp
index 3aeb7e6fbb..a4c344ce0a 100644
--- a/lib/AST/ODRHash.cpp
+++ b/lib/AST/ODRHash.cpp
@@ -49,7 +49,7 @@ void ODRHash::AddDeclarationNameImpl(DeclarationName Name) {
auto Result = DeclNameMap.insert(std::make_pair(Name, DeclNameMap.size()));
ID.AddInteger(Result.first->second);
if (!Result.second) {
- // If found in map, the the DeclarationName has previously been processed.
+ // If found in map, the DeclarationName has previously been processed.
return;
}
diff --git a/lib/Analysis/OSLog.cpp b/lib/AST/OSLog.cpp
index b2983932ea..df2f808728 100644
--- a/lib/Analysis/OSLog.cpp
+++ b/lib/AST/OSLog.cpp
@@ -1,11 +1,11 @@
// TODO: header template
-#include "clang/Analysis/Analyses/OSLog.h"
+#include "clang/AST/OSLog.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprObjC.h"
-#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/AST/FormatString.h"
#include "clang/Basic/Builtins.h"
#include "llvm/ADT/SmallBitVector.h"
@@ -26,6 +26,7 @@ private:
Optional<const Expr *> Precision;
Optional<const Expr *> FieldWidth;
unsigned char Flags = 0;
+ StringRef MaskType;
};
SmallVector<ArgData, 4> ArgsData;
ArrayRef<const Expr *> Args;
@@ -120,18 +121,26 @@ public:
ArgsData.back().FieldWidth = Args[FS.getFieldWidth().getArgIndex()];
}
- if (FS.isPrivate()) {
+ if (FS.isSensitive())
+ ArgsData.back().Flags |= OSLogBufferItem::IsSensitive;
+ else if (FS.isPrivate())
ArgsData.back().Flags |= OSLogBufferItem::IsPrivate;
- }
- if (FS.isPublic()) {
+ else if (FS.isPublic())
ArgsData.back().Flags |= OSLogBufferItem::IsPublic;
- }
+
+ ArgsData.back().MaskType = FS.getMaskType();
return true;
}
void computeLayout(ASTContext &Ctx, OSLogBufferLayout &Layout) const {
Layout.Items.clear();
for (auto &Data : ArgsData) {
+ if (!Data.MaskType.empty()) {
+ CharUnits Size = CharUnits::fromQuantity(8);
+ Layout.Items.emplace_back(OSLogBufferItem::MaskKind, nullptr,
+ Size, 0, Data.MaskType);
+ }
+
if (Data.FieldWidth) {
CharUnits Size = Ctx.getTypeSizeInChars((*Data.FieldWidth)->getType());
Layout.Items.emplace_back(OSLogBufferItem::ScalarKind, *Data.FieldWidth,
diff --git a/lib/AST/OpenMPClause.cpp b/lib/AST/OpenMPClause.cpp
index 1114ae3e92..3124b0ff6f 100644
--- a/lib/AST/OpenMPClause.cpp
+++ b/lib/AST/OpenMPClause.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclOpenMP.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Casting.h"
@@ -107,6 +108,10 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
break;
}
@@ -177,6 +182,10 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
break;
}
@@ -1043,3 +1052,442 @@ OMPIsDevicePtrClause *OMPIsDevicePtrClause::CreateEmpty(
return new (Mem) OMPIsDevicePtrClause(NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents);
}
+
+//===----------------------------------------------------------------------===//
+// OpenMP clauses printing methods
+//===----------------------------------------------------------------------===//
+
+void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
+ OS << "if(";
+ if (Node->getNameModifier() != OMPD_unknown)
+ OS << getOpenMPDirectiveName(Node->getNameModifier()) << ": ";
+ Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPFinalClause(OMPFinalClause *Node) {
+ OS << "final(";
+ Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) {
+ OS << "num_threads(";
+ Node->getNumThreads()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
+ OS << "safelen(";
+ Node->getSafelen()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) {
+ OS << "simdlen(";
+ Node->getSimdlen()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
+ OS << "collapse(";
+ Node->getNumForLoops()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
+ OS << "default("
+ << getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPProcBindClause(OMPProcBindClause *Node) {
+ OS << "proc_bind("
+ << getOpenMPSimpleClauseTypeName(OMPC_proc_bind, Node->getProcBindKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {
+ OS << "unified_address";
+}
+
+void OMPClausePrinter::VisitOMPUnifiedSharedMemoryClause(
+ OMPUnifiedSharedMemoryClause *) {
+ OS << "unified_shared_memory";
+}
+
+void OMPClausePrinter::VisitOMPReverseOffloadClause(OMPReverseOffloadClause *) {
+ OS << "reverse_offload";
+}
+
+void OMPClausePrinter::VisitOMPDynamicAllocatorsClause(
+ OMPDynamicAllocatorsClause *) {
+ OS << "dynamic_allocators";
+}
+
+void OMPClausePrinter::VisitOMPAtomicDefaultMemOrderClause(
+ OMPAtomicDefaultMemOrderClause *Node) {
+ OS << "atomic_default_mem_order("
+ << getOpenMPSimpleClauseTypeName(OMPC_atomic_default_mem_order,
+ Node->getAtomicDefaultMemOrderKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
+ OS << "schedule(";
+ if (Node->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ Node->getFirstScheduleModifier());
+ if (Node->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
+ OS << ", ";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ Node->getSecondScheduleModifier());
+ }
+ OS << ": ";
+ }
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule, Node->getScheduleKind());
+ if (auto *E = Node->getChunkSize()) {
+ OS << ", ";
+ E->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *Node) {
+ OS << "ordered";
+ if (auto *Num = Node->getNumForLoops()) {
+ OS << "(";
+ Num->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *) {
+ OS << "nowait";
+}
+
+void OMPClausePrinter::VisitOMPUntiedClause(OMPUntiedClause *) {
+ OS << "untied";
+}
+
+void OMPClausePrinter::VisitOMPNogroupClause(OMPNogroupClause *) {
+ OS << "nogroup";
+}
+
+void OMPClausePrinter::VisitOMPMergeableClause(OMPMergeableClause *) {
+ OS << "mergeable";
+}
+
+void OMPClausePrinter::VisitOMPReadClause(OMPReadClause *) { OS << "read"; }
+
+void OMPClausePrinter::VisitOMPWriteClause(OMPWriteClause *) { OS << "write"; }
+
+void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *) {
+ OS << "update";
+}
+
+void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
+ OS << "capture";
+}
+
+void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
+ OS << "seq_cst";
+}
+
+void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) {
+ OS << "threads";
+}
+
+void OMPClausePrinter::VisitOMPSIMDClause(OMPSIMDClause *) { OS << "simd"; }
+
+void OMPClausePrinter::VisitOMPDeviceClause(OMPDeviceClause *Node) {
+ OS << "device(";
+ Node->getDevice()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumTeamsClause(OMPNumTeamsClause *Node) {
+ OS << "num_teams(";
+ Node->getNumTeams()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPThreadLimitClause(OMPThreadLimitClause *Node) {
+ OS << "thread_limit(";
+ Node->getThreadLimit()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPPriorityClause(OMPPriorityClause *Node) {
+ OS << "priority(";
+ Node->getPriority()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPGrainsizeClause(OMPGrainsizeClause *Node) {
+ OS << "grainsize(";
+ Node->getGrainsize()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumTasksClause(OMPNumTasksClause *Node) {
+ OS << "num_tasks(";
+ Node->getNumTasks()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPHintClause(OMPHintClause *Node) {
+ OS << "hint(";
+ Node->getHint()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+template<typename T>
+void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
+ for (typename T::varlist_iterator I = Node->varlist_begin(),
+ E = Node->varlist_end();
+ I != E; ++I) {
+ assert(*I && "Expected non-null Stmt");
+ OS << (I == Node->varlist_begin() ? StartSym : ',');
+ if (auto *DRE = dyn_cast<DeclRefExpr>(*I)) {
+ if (isa<OMPCapturedExprDecl>(DRE->getDecl()))
+ DRE->printPretty(OS, nullptr, Policy, 0);
+ else
+ DRE->getDecl()->printQualifiedName(OS);
+ } else
+ (*I)->printPretty(OS, nullptr, Policy, 0);
+ }
+}
+
+void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "private";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPFirstprivateClause(OMPFirstprivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "firstprivate";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPLastprivateClause(OMPLastprivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "lastprivate";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPSharedClause(OMPSharedClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "shared";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "reduction(";
+ NestedNameSpecifier *QualifierLoc =
+ Node->getQualifierLoc().getNestedNameSpecifier();
+ OverloadedOperatorKind OOK =
+ Node->getNameInfo().getName().getCXXOverloadedOperator();
+ if (QualifierLoc == nullptr && OOK != OO_None) {
+ // Print reduction identifier in C format
+ OS << getOperatorSpelling(OOK);
+ } else {
+ // Use C++ format
+ if (QualifierLoc != nullptr)
+ QualifierLoc->print(OS, Policy);
+ OS << Node->getNameInfo();
+ }
+ OS << ":";
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPTaskReductionClause(
+ OMPTaskReductionClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "task_reduction(";
+ NestedNameSpecifier *QualifierLoc =
+ Node->getQualifierLoc().getNestedNameSpecifier();
+ OverloadedOperatorKind OOK =
+ Node->getNameInfo().getName().getCXXOverloadedOperator();
+ if (QualifierLoc == nullptr && OOK != OO_None) {
+ // Print reduction identifier in C format
+ OS << getOperatorSpelling(OOK);
+ } else {
+ // Use C++ format
+ if (QualifierLoc != nullptr)
+ QualifierLoc->print(OS, Policy);
+ OS << Node->getNameInfo();
+ }
+ OS << ":";
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPInReductionClause(OMPInReductionClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "in_reduction(";
+ NestedNameSpecifier *QualifierLoc =
+ Node->getQualifierLoc().getNestedNameSpecifier();
+ OverloadedOperatorKind OOK =
+ Node->getNameInfo().getName().getCXXOverloadedOperator();
+ if (QualifierLoc == nullptr && OOK != OO_None) {
+ // Print reduction identifier in C format
+ OS << getOperatorSpelling(OOK);
+ } else {
+ // Use C++ format
+ if (QualifierLoc != nullptr)
+ QualifierLoc->print(OS, Policy);
+ OS << Node->getNameInfo();
+ }
+ OS << ":";
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPLinearClause(OMPLinearClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "linear";
+ if (Node->getModifierLoc().isValid()) {
+ OS << '('
+ << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier());
+ }
+ VisitOMPClauseList(Node, '(');
+ if (Node->getModifierLoc().isValid())
+ OS << ')';
+ if (Node->getStep() != nullptr) {
+ OS << ": ";
+ Node->getStep()->printPretty(OS, nullptr, Policy, 0);
+ }
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPAlignedClause(OMPAlignedClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "aligned";
+ VisitOMPClauseList(Node, '(');
+ if (Node->getAlignment() != nullptr) {
+ OS << ": ";
+ Node->getAlignment()->printPretty(OS, nullptr, Policy, 0);
+ }
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPCopyinClause(OMPCopyinClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "copyin";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPCopyprivateClause(OMPCopyprivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "copyprivate";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPFlushClause(OMPFlushClause *Node) {
+ if (!Node->varlist_empty()) {
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
+ OS << "depend(";
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
+ Node->getDependencyKind());
+ if (!Node->varlist_empty()) {
+ OS << " :";
+ VisitOMPClauseList(Node, ' ');
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "map(";
+ if (Node->getMapType() != OMPC_MAP_unknown) {
+ if (Node->getMapTypeModifier() != OMPC_MAP_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_map,
+ Node->getMapTypeModifier());
+ OS << ',';
+ }
+ OS << getOpenMPSimpleClauseTypeName(OMPC_map, Node->getMapType());
+ OS << ':';
+ }
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPToClause(OMPToClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "to";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPFromClause(OMPFromClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "from";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPDistScheduleClause(OMPDistScheduleClause *Node) {
+ OS << "dist_schedule(" << getOpenMPSimpleClauseTypeName(
+ OMPC_dist_schedule, Node->getDistScheduleKind());
+ if (auto *E = Node->getChunkSize()) {
+ OS << ", ";
+ E->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPDefaultmapClause(OMPDefaultmapClause *Node) {
+ OS << "defaultmap(";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
+ Node->getDefaultmapModifier());
+ OS << ": ";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
+ Node->getDefaultmapKind());
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "use_device_ptr";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "is_device_ptr";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
diff --git a/lib/AST/ParentMap.cpp b/lib/AST/ParentMap.cpp
index af2a480dc2..88c178aa37 100644
--- a/lib/AST/ParentMap.cpp
+++ b/lib/AST/ParentMap.cpp
@@ -163,7 +163,7 @@ bool ParentMap::isConsumedExpr(Expr* E) const {
// Ignore parents that don't guarantee consumption.
while (P && (isa<ParenExpr>(P) || isa<CastExpr>(P) ||
- isa<ExprWithCleanups>(P))) {
+ isa<FullExpr>(P))) {
DirectChild = P;
P = getParent(P);
}
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/AST/PrintfFormatString.cpp
index dcb15c5e37..e0a0c5b758 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/AST/PrintfFormatString.cpp
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Analysis/Analyses/FormatString.h"
-#include "clang/Analysis/Analyses/OSLog.h"
+#include "clang/AST/FormatString.h"
+#include "clang/AST/OSLog.h"
#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
@@ -127,7 +127,9 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
do {
StringRef Str(I, E - I);
- std::string Match = "^[\t\n\v\f\r ]*(private|public)[\t\n\v\f\r ]*(,|})";
+ std::string Match = "^[[:space:]]*"
+ "(private|public|sensitive|mask\\.[^[:space:],}]*)"
+ "[[:space:]]*(,|})";
llvm::Regex R(Match);
SmallVector<StringRef, 2> Matches;
@@ -138,7 +140,17 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
// Set the privacy flag if the privacy annotation in the
// comma-delimited segment is at least as strict as the privacy
// annotations in previous comma-delimited segments.
- if (MatchedStr.equals("private"))
+ if (MatchedStr.startswith("mask")) {
+ StringRef MaskType = MatchedStr.substr(sizeof("mask.") - 1);
+ unsigned Size = MaskType.size();
+ if (Warn && (Size == 0 || Size > 8))
+ H.handleInvalidMaskType(MaskType);
+ FS.setMaskType(MaskType);
+ } else if (MatchedStr.equals("sensitive"))
+ PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsSensitive;
+ else if (PrivacyFlags !=
+ clang::analyze_os_log::OSLogBufferItem::IsSensitive &&
+ MatchedStr.equals("private"))
PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPrivate;
else if (PrivacyFlags == 0 && MatchedStr.equals("public"))
PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPublic;
@@ -168,6 +180,9 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case clang::analyze_os_log::OSLogBufferItem::IsPublic:
FS.setIsPublic(MatchedStr.data());
break;
+ case clang::analyze_os_log::OSLogBufferItem::IsSensitive:
+ FS.setIsSensitive(MatchedStr.data());
+ break;
default:
llvm_unreachable("Unexpected privacy flag value");
}
@@ -232,6 +247,9 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
}
}
+ if (ParseVectorModifier(H, FS, I, E, LO))
+ return true;
+
// Look for the length modifier.
if (ParseLengthModifier(FS, I, E, LO) && I == E) {
// No more characters left?
@@ -347,6 +365,7 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case 'Z':
if (Target.getTriple().isOSMSVCRT())
k = ConversionSpecifier::ZArg;
+ break;
}
// Check to see if we used the Objective-C modifier flags with
@@ -445,13 +464,8 @@ bool clang::analyze_format_string::ParseFormatStringHasSArg(const char *I,
// Methods on PrintfSpecifier.
//===----------------------------------------------------------------------===//
-ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
- bool IsObjCLiteral) const {
- const PrintfConversionSpecifier &CS = getConversionSpecifier();
-
- if (!CS.consumesDataArgument())
- return ArgType::Invalid();
-
+ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx,
+ bool IsObjCLiteral) const {
if (CS.getKind() == ConversionSpecifier::cArg)
switch (LM.getKind()) {
case LengthModifier::None:
@@ -611,6 +625,21 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
return ArgType();
}
+
+ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
+ bool IsObjCLiteral) const {
+ const PrintfConversionSpecifier &CS = getConversionSpecifier();
+
+ if (!CS.consumesDataArgument())
+ return ArgType::Invalid();
+
+ ArgType ScalarTy = getScalarArgType(Ctx, IsObjCLiteral);
+ if (!ScalarTy.isValid() || VectorNumElts.isInvalid())
+ return ScalarTy;
+
+ return ScalarTy.makeVectorType(Ctx, VectorNumElts.getConstantAmount());
+}
+
bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
ASTContext &Ctx, bool IsObjCLiteral) {
// %n is different from other conversion specifiers; don't try to fix it.
@@ -660,8 +689,17 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
if (const EnumType *ETy = QT->getAs<EnumType>())
QT = ETy->getDecl()->getIntegerType();
- // We can only work with builtin types.
const BuiltinType *BT = QT->getAs<BuiltinType>();
+ if (!BT) {
+ const VectorType *VT = QT->getAs<VectorType>();
+ if (VT) {
+ QT = VT->getElementType();
+ BT = QT->getAs<BuiltinType>();
+ VectorNumElts = OptionalAmount(VT->getNumElements());
+ }
+ }
+
+ // We can only work with builtin types.
if (!BT)
return false;
@@ -708,6 +746,9 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
#define SIGNED_TYPE(Id, SingletonId)
#define UNSIGNED_TYPE(Id, SingletonId)
#define FLOATING_TYPE(Id, SingletonId)
@@ -830,6 +871,11 @@ void PrintfSpecifier::toString(raw_ostream &os) const {
FieldWidth.toString(os);
// Precision
Precision.toString(os);
+
+ // Vector modifier
+ if (!VectorNumElts.isInvalid())
+ os << 'v' << VectorNumElts.getConstantAmount();
+
// Length modifier
os << LM.toString();
// Conversion specifier
diff --git a/lib/AST/RawCommentList.cpp b/lib/AST/RawCommentList.cpp
index 1f504fb7dd..ab873a3964 100644
--- a/lib/AST/RawCommentList.cpp
+++ b/lib/AST/RawCommentList.cpp
@@ -415,7 +415,7 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
Str.pop_back();
};
- // Proces first line separately to remember indent for the following lines.
+ // Process first line separately to remember indent for the following lines.
if (!LexLine(/*IsFirstLine=*/true)) {
DropTrailingNewLines(Result);
return Result;
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index 6f71d5b83e..62dc22c814 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -9,6 +9,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
@@ -16,7 +17,6 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
@@ -2829,15 +2829,14 @@ void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
CharUnits BaseOffset;
// Respect the external AST source base offset, if present.
- bool FoundBase = false;
if (UseExternalLayout) {
- FoundBase = External.getExternalVBaseOffset(BaseDecl, BaseOffset);
- if (FoundBase)
- assert(BaseOffset >= Size && "base offset already allocated");
- }
- if (!FoundBase)
+ if (!External.getExternalVBaseOffset(BaseDecl, BaseOffset))
+ BaseOffset = Size;
+ } else
BaseOffset = Size.alignTo(Info.Alignment);
+ assert(BaseOffset >= Size && "base offset already allocated");
+
VBases.insert(std::make_pair(BaseDecl,
ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp)));
Size = BaseOffset + BaseLayout.getNonVirtualSize();
diff --git a/lib/Analysis/ScanfFormatString.cpp b/lib/AST/ScanfFormatString.cpp
index a9af0cdfda..bda97c57c8 100644
--- a/lib/Analysis/ScanfFormatString.cpp
+++ b/lib/AST/ScanfFormatString.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/AST/FormatString.h"
#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index b03b3a6837..116291bfa1 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -76,6 +76,14 @@ const char *Stmt::getStmtClassName() const {
return getStmtInfoTableEntry((StmtClass) StmtBits.sClass).Name;
}
+// Check that no statement / expression class is polymorphic. LLVM style RTTI
+// should be used instead. If absolutely needed an exception can still be added
+// here by defining the appropriate macro (but please don't do this).
+#define STMT(CLASS, PARENT) \
+ static_assert(!std::is_polymorphic<CLASS>::value, \
+ #CLASS " should not be polymorphic!");
+#include "clang/AST/StmtNodes.inc"
+
void Stmt::PrintStats() {
// Ensure the table is primed.
getStmtInfoTableEntry(Stmt::NullStmtClass);
@@ -118,8 +126,8 @@ Stmt *Stmt::IgnoreImplicit() {
while (s != lasts) {
lasts = s;
- if (auto *ewc = dyn_cast<ExprWithCleanups>(s))
- s = ewc->getSubExpr();
+ if (auto *fe = dyn_cast<FullExpr>(s))
+ s = fe->getSubExpr();
if (auto *mte = dyn_cast<MaterializeTemporaryExpr>(s))
s = mte->GetTemporaryExpr();
@@ -303,17 +311,15 @@ SourceLocation Stmt::getEndLoc() const {
}
int64_t Stmt::getID(const ASTContext &Context) const {
- Optional<int64_t> Out = Context.getAllocator().identifyObject(this);
- assert(Out && "Wrong allocator used");
- assert(*Out % alignof(Stmt) == 0 && "Wrong alignment information");
- return *Out / alignof(Stmt);
+ return Context.getAllocator().identifyKnownAlignedObject<Stmt>(this);
}
CompoundStmt::CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB,
SourceLocation RB)
- : Stmt(CompoundStmtClass), LBraceLoc(LB), RBraceLoc(RB) {
+ : Stmt(CompoundStmtClass), RBraceLoc(RB) {
CompoundStmtBits.NumStmts = Stmts.size();
setStmts(Stmts);
+ CompoundStmtBits.LBraceLoc = LB;
}
void CompoundStmt::setStmts(ArrayRef<Stmt *> Stmts) {
@@ -798,51 +804,99 @@ void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr,
});
}
-IfStmt::IfStmt(const ASTContext &C, SourceLocation IL, bool IsConstexpr,
- Stmt *init, VarDecl *var, Expr *cond, Stmt *then,
- SourceLocation EL, Stmt *elsev)
- : Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL) {
+IfStmt::IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr,
+ Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then,
+ SourceLocation EL, Stmt *Else)
+ : Stmt(IfStmtClass) {
+ bool HasElse = Else != nullptr;
+ bool HasVar = Var != nullptr;
+ bool HasInit = Init != nullptr;
+ IfStmtBits.HasElse = HasElse;
+ IfStmtBits.HasVar = HasVar;
+ IfStmtBits.HasInit = HasInit;
+
setConstexpr(IsConstexpr);
- setConditionVariable(C, var);
- SubExprs[INIT] = init;
- SubExprs[COND] = cond;
- SubExprs[THEN] = then;
- SubExprs[ELSE] = elsev;
-}
-VarDecl *IfStmt::getConditionVariable() const {
- if (!SubExprs[VAR])
+ setCond(Cond);
+ setThen(Then);
+ if (HasElse)
+ setElse(Else);
+ if (HasVar)
+ setConditionVariable(Ctx, Var);
+ if (HasInit)
+ setInit(Init);
+
+ setIfLoc(IL);
+ if (HasElse)
+ setElseLoc(EL);
+}
+
+IfStmt::IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit)
+ : Stmt(IfStmtClass, Empty) {
+ IfStmtBits.HasElse = HasElse;
+ IfStmtBits.HasVar = HasVar;
+ IfStmtBits.HasInit = HasInit;
+}
+
+IfStmt *IfStmt::Create(const ASTContext &Ctx, SourceLocation IL,
+ bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
+ Stmt *Then, SourceLocation EL, Stmt *Else) {
+ bool HasElse = Else != nullptr;
+ bool HasVar = Var != nullptr;
+ bool HasInit = Init != nullptr;
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, SourceLocation>(
+ NumMandatoryStmtPtr + HasElse + HasVar + HasInit, HasElse),
+ alignof(IfStmt));
+ return new (Mem)
+ IfStmt(Ctx, IL, IsConstexpr, Init, Var, Cond, Then, EL, Else);
+}
+
+IfStmt *IfStmt::CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
+ bool HasInit) {
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, SourceLocation>(
+ NumMandatoryStmtPtr + HasElse + HasVar + HasInit, HasElse),
+ alignof(IfStmt));
+ return new (Mem) IfStmt(EmptyShell(), HasElse, HasVar, HasInit);
+}
+
+VarDecl *IfStmt::getConditionVariable() {
+ auto *DS = getConditionVariableDeclStmt();
+ if (!DS)
return nullptr;
-
- auto *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
-void IfStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+void IfStmt::setConditionVariable(const ASTContext &Ctx, VarDecl *V) {
+ assert(hasVarStorage() &&
+ "This if statement has no storage for a condition variable!");
+
if (!V) {
- SubExprs[VAR] = nullptr;
+ getTrailingObjects<Stmt *>()[varOffset()] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
- SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
- VarRange.getEnd());
+ getTrailingObjects<Stmt *>()[varOffset()] = new (Ctx)
+ DeclStmt(DeclGroupRef(V), VarRange.getBegin(), VarRange.getEnd());
}
bool IfStmt::isObjCAvailabilityCheck() const {
- return isa<ObjCAvailabilityCheckExpr>(SubExprs[COND]);
+ return isa<ObjCAvailabilityCheckExpr>(getCond());
}
ForStmt::ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP)
- : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
+ : Stmt(ForStmtClass), LParenLoc(LP), RParenLoc(RP)
{
SubExprs[INIT] = Init;
setConditionVariable(C, condVar);
SubExprs[COND] = Cond;
SubExprs[INC] = Inc;
SubExprs[BODY] = Body;
+ ForStmtBits.ForLoc = FL;
}
VarDecl *ForStmt::getConditionVariable() const {
@@ -864,66 +918,125 @@ void ForStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
VarRange.getEnd());
}
-SwitchStmt::SwitchStmt(const ASTContext &C, Stmt *init, VarDecl *Var,
- Expr *cond)
- : Stmt(SwitchStmtClass), FirstCase(nullptr, false) {
- setConditionVariable(C, Var);
- SubExprs[INIT] = init;
- SubExprs[COND] = cond;
- SubExprs[BODY] = nullptr;
+SwitchStmt::SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
+ Expr *Cond)
+ : Stmt(SwitchStmtClass), FirstCase(nullptr) {
+ bool HasInit = Init != nullptr;
+ bool HasVar = Var != nullptr;
+ SwitchStmtBits.HasInit = HasInit;
+ SwitchStmtBits.HasVar = HasVar;
+ SwitchStmtBits.AllEnumCasesCovered = false;
+
+ setCond(Cond);
+ setBody(nullptr);
+ if (HasInit)
+ setInit(Init);
+ if (HasVar)
+ setConditionVariable(Ctx, Var);
+
+ setSwitchLoc(SourceLocation{});
}
-VarDecl *SwitchStmt::getConditionVariable() const {
- if (!SubExprs[VAR])
- return nullptr;
+SwitchStmt::SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar)
+ : Stmt(SwitchStmtClass, Empty) {
+ SwitchStmtBits.HasInit = HasInit;
+ SwitchStmtBits.HasVar = HasVar;
+ SwitchStmtBits.AllEnumCasesCovered = false;
+}
+
+SwitchStmt *SwitchStmt::Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
+ Expr *Cond) {
+ bool HasInit = Init != nullptr;
+ bool HasVar = Var != nullptr;
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasInit + HasVar),
+ alignof(SwitchStmt));
+ return new (Mem) SwitchStmt(Ctx, Init, Var, Cond);
+}
+
+SwitchStmt *SwitchStmt::CreateEmpty(const ASTContext &Ctx, bool HasInit,
+ bool HasVar) {
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasInit + HasVar),
+ alignof(SwitchStmt));
+ return new (Mem) SwitchStmt(EmptyShell(), HasInit, HasVar);
+}
- auto *DS = cast<DeclStmt>(SubExprs[VAR]);
+VarDecl *SwitchStmt::getConditionVariable() {
+ auto *DS = getConditionVariableDeclStmt();
+ if (!DS)
+ return nullptr;
return cast<VarDecl>(DS->getSingleDecl());
}
-void SwitchStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+void SwitchStmt::setConditionVariable(const ASTContext &Ctx, VarDecl *V) {
+ assert(hasVarStorage() &&
+ "This switch statement has no storage for a condition variable!");
+
if (!V) {
- SubExprs[VAR] = nullptr;
+ getTrailingObjects<Stmt *>()[varOffset()] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
- SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
- VarRange.getEnd());
+ getTrailingObjects<Stmt *>()[varOffset()] = new (Ctx)
+ DeclStmt(DeclGroupRef(V), VarRange.getBegin(), VarRange.getEnd());
}
-Stmt *SwitchCase::getSubStmt() {
- if (isa<CaseStmt>(this))
- return cast<CaseStmt>(this)->getSubStmt();
- return cast<DefaultStmt>(this)->getSubStmt();
+WhileStmt::WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
+ Stmt *Body, SourceLocation WL)
+ : Stmt(WhileStmtClass) {
+ bool HasVar = Var != nullptr;
+ WhileStmtBits.HasVar = HasVar;
+
+ setCond(Cond);
+ setBody(Body);
+ if (HasVar)
+ setConditionVariable(Ctx, Var);
+
+ setWhileLoc(WL);
}
-WhileStmt::WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
- SourceLocation WL)
- : Stmt(WhileStmtClass) {
- setConditionVariable(C, Var);
- SubExprs[COND] = cond;
- SubExprs[BODY] = body;
- WhileLoc = WL;
+WhileStmt::WhileStmt(EmptyShell Empty, bool HasVar)
+ : Stmt(WhileStmtClass, Empty) {
+ WhileStmtBits.HasVar = HasVar;
}
-VarDecl *WhileStmt::getConditionVariable() const {
- if (!SubExprs[VAR])
- return nullptr;
+WhileStmt *WhileStmt::Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
+ Stmt *Body, SourceLocation WL) {
+ bool HasVar = Var != nullptr;
+ void *Mem =
+ Ctx.Allocate(totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasVar),
+ alignof(WhileStmt));
+ return new (Mem) WhileStmt(Ctx, Var, Cond, Body, WL);
+}
+
+WhileStmt *WhileStmt::CreateEmpty(const ASTContext &Ctx, bool HasVar) {
+ void *Mem =
+ Ctx.Allocate(totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasVar),
+ alignof(WhileStmt));
+ return new (Mem) WhileStmt(EmptyShell(), HasVar);
+}
- auto *DS = cast<DeclStmt>(SubExprs[VAR]);
+VarDecl *WhileStmt::getConditionVariable() {
+ auto *DS = getConditionVariableDeclStmt();
+ if (!DS)
+ return nullptr;
return cast<VarDecl>(DS->getSingleDecl());
}
-void WhileStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+void WhileStmt::setConditionVariable(const ASTContext &Ctx, VarDecl *V) {
+ assert(hasVarStorage() &&
+ "This while statement has no storage for a condition variable!");
+
if (!V) {
- SubExprs[VAR] = nullptr;
+ getTrailingObjects<Stmt *>()[varOffset()] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
- SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
- VarRange.getEnd());
+ getTrailingObjects<Stmt *>()[varOffset()] = new (Ctx)
+ DeclStmt(DeclGroupRef(V), VarRange.getBegin(), VarRange.getEnd());
}
// IndirectGotoStmt
@@ -934,11 +1047,54 @@ LabelDecl *IndirectGotoStmt::getConstantTarget() {
}
// ReturnStmt
-const Expr* ReturnStmt::getRetValue() const {
- return cast_or_null<Expr>(RetExpr);
-}
-Expr* ReturnStmt::getRetValue() {
- return cast_or_null<Expr>(RetExpr);
+ReturnStmt::ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate)
+ : Stmt(ReturnStmtClass), RetExpr(E) {
+ bool HasNRVOCandidate = NRVOCandidate != nullptr;
+ ReturnStmtBits.HasNRVOCandidate = HasNRVOCandidate;
+ if (HasNRVOCandidate)
+ setNRVOCandidate(NRVOCandidate);
+ setReturnLoc(RL);
+}
+
+ReturnStmt::ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate)
+ : Stmt(ReturnStmtClass, Empty) {
+ ReturnStmtBits.HasNRVOCandidate = HasNRVOCandidate;
+}
+
+ReturnStmt *ReturnStmt::Create(const ASTContext &Ctx, SourceLocation RL,
+ Expr *E, const VarDecl *NRVOCandidate) {
+ bool HasNRVOCandidate = NRVOCandidate != nullptr;
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<const VarDecl *>(HasNRVOCandidate),
+ alignof(ReturnStmt));
+ return new (Mem) ReturnStmt(RL, E, NRVOCandidate);
+}
+
+ReturnStmt *ReturnStmt::CreateEmpty(const ASTContext &Ctx,
+ bool HasNRVOCandidate) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<const VarDecl *>(HasNRVOCandidate),
+ alignof(ReturnStmt));
+ return new (Mem) ReturnStmt(EmptyShell(), HasNRVOCandidate);
+}
+
+// CaseStmt
+CaseStmt *CaseStmt::Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
+ SourceLocation caseLoc, SourceLocation ellipsisLoc,
+ SourceLocation colonLoc) {
+ bool CaseStmtIsGNURange = rhs != nullptr;
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, SourceLocation>(
+ NumMandatoryStmtPtr + CaseStmtIsGNURange, CaseStmtIsGNURange),
+ alignof(CaseStmt));
+ return new (Mem) CaseStmt(lhs, rhs, caseLoc, ellipsisLoc, colonLoc);
+}
+
+CaseStmt *CaseStmt::CreateEmpty(const ASTContext &Ctx,
+ bool CaseStmtIsGNURange) {
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, SourceLocation>(
+ NumMandatoryStmtPtr + CaseStmtIsGNURange, CaseStmtIsGNURange),
+ alignof(CaseStmt));
+ return new (Mem) CaseStmt(EmptyShell(), CaseStmtIsGNURange);
}
SEHTryStmt::SEHTryStmt(bool IsCXXTry, SourceLocation TryLoc, Stmt *TryBlock,
diff --git a/lib/AST/StmtOpenMP.cpp b/lib/AST/StmtOpenMP.cpp
index 1258af7a2d..85a2daa080 100644
--- a/lib/AST/StmtOpenMP.cpp
+++ b/lib/AST/StmtOpenMP.cpp
@@ -1079,6 +1079,8 @@ OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -1145,6 +1147,8 @@ OMPDistributeParallelForSimdDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
return Dir;
}
@@ -1457,6 +1461,8 @@ OMPTeamsDistributeParallelForSimdDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
return Dir;
}
@@ -1524,6 +1530,8 @@ OMPTeamsDistributeParallelForDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -1670,6 +1678,8 @@ OMPTargetTeamsDistributeParallelForDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -1741,6 +1751,8 @@ OMPTargetTeamsDistributeParallelForSimdDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
return Dir;
}
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 51591953e7..ae726e3871 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -622,443 +622,6 @@ void StmtPrinter::VisitSEHLeaveStmt(SEHLeaveStmt *Node) {
}
//===----------------------------------------------------------------------===//
-// OpenMP clauses printing methods
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class OMPClausePrinter : public OMPClauseVisitor<OMPClausePrinter> {
- raw_ostream &OS;
- const PrintingPolicy &Policy;
-
- /// Process clauses with list of variables.
- template <typename T>
- void VisitOMPClauseList(T *Node, char StartSym);
-
-public:
- OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
- : OS(OS), Policy(Policy) {}
-
-#define OPENMP_CLAUSE(Name, Class) \
- void Visit##Class(Class *S);
-#include "clang/Basic/OpenMPKinds.def"
-};
-
-} // namespace
-
-void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
- OS << "if(";
- if (Node->getNameModifier() != OMPD_unknown)
- OS << getOpenMPDirectiveName(Node->getNameModifier()) << ": ";
- Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPFinalClause(OMPFinalClause *Node) {
- OS << "final(";
- Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) {
- OS << "num_threads(";
- Node->getNumThreads()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
- OS << "safelen(";
- Node->getSafelen()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) {
- OS << "simdlen(";
- Node->getSimdlen()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
- OS << "collapse(";
- Node->getNumForLoops()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
- OS << "default("
- << getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
- << ")";
-}
-
-void OMPClausePrinter::VisitOMPProcBindClause(OMPProcBindClause *Node) {
- OS << "proc_bind("
- << getOpenMPSimpleClauseTypeName(OMPC_proc_bind, Node->getProcBindKind())
- << ")";
-}
-
-void OMPClausePrinter::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {
- OS << "unified_address";
-}
-
-void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
- OS << "schedule(";
- if (Node->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
- OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
- Node->getFirstScheduleModifier());
- if (Node->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
- OS << ", ";
- OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
- Node->getSecondScheduleModifier());
- }
- OS << ": ";
- }
- OS << getOpenMPSimpleClauseTypeName(OMPC_schedule, Node->getScheduleKind());
- if (auto *E = Node->getChunkSize()) {
- OS << ", ";
- E->printPretty(OS, nullptr, Policy);
- }
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *Node) {
- OS << "ordered";
- if (auto *Num = Node->getNumForLoops()) {
- OS << "(";
- Num->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *) {
- OS << "nowait";
-}
-
-void OMPClausePrinter::VisitOMPUntiedClause(OMPUntiedClause *) {
- OS << "untied";
-}
-
-void OMPClausePrinter::VisitOMPNogroupClause(OMPNogroupClause *) {
- OS << "nogroup";
-}
-
-void OMPClausePrinter::VisitOMPMergeableClause(OMPMergeableClause *) {
- OS << "mergeable";
-}
-
-void OMPClausePrinter::VisitOMPReadClause(OMPReadClause *) { OS << "read"; }
-
-void OMPClausePrinter::VisitOMPWriteClause(OMPWriteClause *) { OS << "write"; }
-
-void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *) {
- OS << "update";
-}
-
-void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
- OS << "capture";
-}
-
-void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
- OS << "seq_cst";
-}
-
-void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) {
- OS << "threads";
-}
-
-void OMPClausePrinter::VisitOMPSIMDClause(OMPSIMDClause *) { OS << "simd"; }
-
-void OMPClausePrinter::VisitOMPDeviceClause(OMPDeviceClause *Node) {
- OS << "device(";
- Node->getDevice()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPNumTeamsClause(OMPNumTeamsClause *Node) {
- OS << "num_teams(";
- Node->getNumTeams()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPThreadLimitClause(OMPThreadLimitClause *Node) {
- OS << "thread_limit(";
- Node->getThreadLimit()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPPriorityClause(OMPPriorityClause *Node) {
- OS << "priority(";
- Node->getPriority()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPGrainsizeClause(OMPGrainsizeClause *Node) {
- OS << "grainsize(";
- Node->getGrainsize()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPNumTasksClause(OMPNumTasksClause *Node) {
- OS << "num_tasks(";
- Node->getNumTasks()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPHintClause(OMPHintClause *Node) {
- OS << "hint(";
- Node->getHint()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-template<typename T>
-void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
- for (typename T::varlist_iterator I = Node->varlist_begin(),
- E = Node->varlist_end();
- I != E; ++I) {
- assert(*I && "Expected non-null Stmt");
- OS << (I == Node->varlist_begin() ? StartSym : ',');
- if (auto *DRE = dyn_cast<DeclRefExpr>(*I)) {
- if (isa<OMPCapturedExprDecl>(DRE->getDecl()))
- DRE->printPretty(OS, nullptr, Policy, 0);
- else
- DRE->getDecl()->printQualifiedName(OS);
- } else
- (*I)->printPretty(OS, nullptr, Policy, 0);
- }
-}
-
-void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "private";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPFirstprivateClause(OMPFirstprivateClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "firstprivate";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPLastprivateClause(OMPLastprivateClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "lastprivate";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPSharedClause(OMPSharedClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "shared";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "reduction(";
- NestedNameSpecifier *QualifierLoc =
- Node->getQualifierLoc().getNestedNameSpecifier();
- OverloadedOperatorKind OOK =
- Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
- // Print reduction identifier in C format
- OS << getOperatorSpelling(OOK);
- } else {
- // Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
- OS << Node->getNameInfo();
- }
- OS << ":";
- VisitOMPClauseList(Node, ' ');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPTaskReductionClause(
- OMPTaskReductionClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "task_reduction(";
- NestedNameSpecifier *QualifierLoc =
- Node->getQualifierLoc().getNestedNameSpecifier();
- OverloadedOperatorKind OOK =
- Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
- // Print reduction identifier in C format
- OS << getOperatorSpelling(OOK);
- } else {
- // Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
- OS << Node->getNameInfo();
- }
- OS << ":";
- VisitOMPClauseList(Node, ' ');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPInReductionClause(OMPInReductionClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "in_reduction(";
- NestedNameSpecifier *QualifierLoc =
- Node->getQualifierLoc().getNestedNameSpecifier();
- OverloadedOperatorKind OOK =
- Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
- // Print reduction identifier in C format
- OS << getOperatorSpelling(OOK);
- } else {
- // Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
- OS << Node->getNameInfo();
- }
- OS << ":";
- VisitOMPClauseList(Node, ' ');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPLinearClause(OMPLinearClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "linear";
- if (Node->getModifierLoc().isValid()) {
- OS << '('
- << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier());
- }
- VisitOMPClauseList(Node, '(');
- if (Node->getModifierLoc().isValid())
- OS << ')';
- if (Node->getStep() != nullptr) {
- OS << ": ";
- Node->getStep()->printPretty(OS, nullptr, Policy, 0);
- }
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPAlignedClause(OMPAlignedClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "aligned";
- VisitOMPClauseList(Node, '(');
- if (Node->getAlignment() != nullptr) {
- OS << ": ";
- Node->getAlignment()->printPretty(OS, nullptr, Policy, 0);
- }
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPCopyinClause(OMPCopyinClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "copyin";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPCopyprivateClause(OMPCopyprivateClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "copyprivate";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPFlushClause(OMPFlushClause *Node) {
- if (!Node->varlist_empty()) {
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
- OS << "depend(";
- OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
- Node->getDependencyKind());
- if (!Node->varlist_empty()) {
- OS << " :";
- VisitOMPClauseList(Node, ' ');
- }
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "map(";
- if (Node->getMapType() != OMPC_MAP_unknown) {
- if (Node->getMapTypeModifier() != OMPC_MAP_unknown) {
- OS << getOpenMPSimpleClauseTypeName(OMPC_map,
- Node->getMapTypeModifier());
- OS << ',';
- }
- OS << getOpenMPSimpleClauseTypeName(OMPC_map, Node->getMapType());
- OS << ':';
- }
- VisitOMPClauseList(Node, ' ');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPToClause(OMPToClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "to";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPFromClause(OMPFromClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "from";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPDistScheduleClause(OMPDistScheduleClause *Node) {
- OS << "dist_schedule(" << getOpenMPSimpleClauseTypeName(
- OMPC_dist_schedule, Node->getDistScheduleKind());
- if (auto *E = Node->getChunkSize()) {
- OS << ", ";
- E->printPretty(OS, nullptr, Policy);
- }
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPDefaultmapClause(OMPDefaultmapClause *Node) {
- OS << "defaultmap(";
- OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- Node->getDefaultmapModifier());
- OS << ": ";
- OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- Node->getDefaultmapKind());
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "use_device_ptr";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "is_device_ptr";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-//===----------------------------------------------------------------------===//
// OpenMP directives printing methods
//===----------------------------------------------------------------------===//
@@ -1343,6 +906,10 @@ void StmtPrinter::VisitOMPTargetTeamsDistributeSimdDirective(
// Expr printing methods.
//===----------------------------------------------------------------------===//
+void StmtPrinter::VisitConstantExpr(ConstantExpr *Node) {
+ PrintExpr(Node->getSubExpr());
+}
+
void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
if (const auto *OCED = dyn_cast<OMPCapturedExprDecl>(Node->getDecl())) {
OCED->getInit()->IgnoreImpCasts()->printPretty(OS, nullptr, Policy);
@@ -1428,7 +995,7 @@ void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
}
void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
- OS << PredefinedExpr::getIdentTypeName(Node->getIdentType());
+ OS << PredefinedExpr::getIdentKindName(Node->getIdentKind());
}
void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
@@ -1672,6 +1239,9 @@ void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
else
OS << "__alignof";
break;
+ case UETT_PreferredAlignOf:
+ OS << "__alignof";
+ break;
case UETT_VecStep:
OS << "vec_step";
break;
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index b97812345b..ec4dac03d4 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -470,6 +470,18 @@ void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { }
void OMPClauseProfiler::VisitOMPUnifiedAddressClause(
const OMPUnifiedAddressClause *C) {}
+void OMPClauseProfiler::VisitOMPUnifiedSharedMemoryClause(
+ const OMPUnifiedSharedMemoryClause *C) {}
+
+void OMPClauseProfiler::VisitOMPReverseOffloadClause(
+ const OMPReverseOffloadClause *C) {}
+
+void OMPClauseProfiler::VisitOMPDynamicAllocatorsClause(
+ const OMPDynamicAllocatorsClause *C) {}
+
+void OMPClauseProfiler::VisitOMPAtomicDefaultMemOrderClause(
+ const OMPAtomicDefaultMemOrderClause *C) {}
+
void OMPClauseProfiler::VisitOMPScheduleClause(const OMPScheduleClause *C) {
VistOMPClauseWithPreInit(C);
if (auto *S = C->getChunkSize())
@@ -987,6 +999,10 @@ void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
+void StmtProfiler::VisitConstantExpr(const ConstantExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
VisitExpr(S);
if (!Canonical)
@@ -1001,7 +1017,7 @@ void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
VisitExpr(S);
- ID.AddInteger(S->getIdentType());
+ ID.AddInteger(S->getIdentKind());
}
void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
diff --git a/lib/AST/TextNodeDumper.cpp b/lib/AST/TextNodeDumper.cpp
new file mode 100644
index 0000000000..27d8f25393
--- /dev/null
+++ b/lib/AST/TextNodeDumper.cpp
@@ -0,0 +1,285 @@
+//===--- TextNodeDumper.cpp - Printing of AST nodes -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements AST dumping of components of individual AST nodes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TextNodeDumper.h"
+
+using namespace clang;
+
+TextNodeDumper::TextNodeDumper(raw_ostream &OS, bool ShowColors,
+ const SourceManager *SM,
+ const PrintingPolicy &PrintPolicy,
+ const comments::CommandTraits *Traits)
+ : OS(OS), ShowColors(ShowColors), SM(SM), PrintPolicy(PrintPolicy),
+ Traits(Traits) {}
+
+void TextNodeDumper::Visit(const comments::Comment *C,
+ const comments::FullComment *FC) {
+ if (!C) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, CommentColor);
+ OS << C->getCommentKindName();
+ }
+ dumpPointer(C);
+ dumpSourceRange(C->getSourceRange());
+
+ ConstCommentVisitor<TextNodeDumper, void,
+ const comments::FullComment *>::visit(C, FC);
+}
+
+void TextNodeDumper::dumpPointer(const void *Ptr) {
+ ColorScope Color(OS, ShowColors, AddressColor);
+ OS << ' ' << Ptr;
+}
+
+void TextNodeDumper::dumpLocation(SourceLocation Loc) {
+ if (!SM)
+ return;
+
+ ColorScope Color(OS, ShowColors, LocationColor);
+ SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
+
+ // The general format we print out is filename:line:col, but we drop pieces
+ // that haven't changed since the last loc printed.
+ PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
+
+ if (PLoc.isInvalid()) {
+ OS << "<invalid sloc>";
+ return;
+ }
+
+ if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
+ OS << PLoc.getFilename() << ':' << PLoc.getLine() << ':'
+ << PLoc.getColumn();
+ LastLocFilename = PLoc.getFilename();
+ LastLocLine = PLoc.getLine();
+ } else if (PLoc.getLine() != LastLocLine) {
+ OS << "line" << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
+ LastLocLine = PLoc.getLine();
+ } else {
+ OS << "col" << ':' << PLoc.getColumn();
+ }
+}
+
+void TextNodeDumper::dumpSourceRange(SourceRange R) {
+ // Can't translate locations if a SourceManager isn't available.
+ if (!SM)
+ return;
+
+ OS << " <";
+ dumpLocation(R.getBegin());
+ if (R.getBegin() != R.getEnd()) {
+ OS << ", ";
+ dumpLocation(R.getEnd());
+ }
+ OS << ">";
+
+ // <t2.c:123:421[blah], t2.c:412:321>
+}
+
+void TextNodeDumper::dumpBareType(QualType T, bool Desugar) {
+ ColorScope Color(OS, ShowColors, TypeColor);
+
+ SplitQualType T_split = T.split();
+ OS << "'" << QualType::getAsString(T_split, PrintPolicy) << "'";
+
+ if (Desugar && !T.isNull()) {
+ // If the type is sugared, also dump a (shallow) desugared type.
+ SplitQualType D_split = T.getSplitDesugaredType();
+ if (T_split != D_split)
+ OS << ":'" << QualType::getAsString(D_split, PrintPolicy) << "'";
+ }
+}
+
+void TextNodeDumper::dumpType(QualType T) {
+ OS << ' ';
+ dumpBareType(T);
+}
+
+void TextNodeDumper::dumpBareDeclRef(const Decl *D) {
+ if (!D) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << D->getDeclKindName();
+ }
+ dumpPointer(D);
+
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ ColorScope Color(OS, ShowColors, DeclNameColor);
+ OS << " '" << ND->getDeclName() << '\'';
+ }
+
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ dumpType(VD->getType());
+}
+
+void TextNodeDumper::dumpName(const NamedDecl *ND) {
+ if (ND->getDeclName()) {
+ ColorScope Color(OS, ShowColors, DeclNameColor);
+ OS << ' ' << ND->getNameAsString();
+ }
+}
+
+void TextNodeDumper::dumpAccessSpecifier(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_none:
+ break;
+ case AS_public:
+ OS << "public";
+ break;
+ case AS_protected:
+ OS << "protected";
+ break;
+ case AS_private:
+ OS << "private";
+ break;
+ }
+}
+
+void TextNodeDumper::dumpCXXTemporary(const CXXTemporary *Temporary) {
+ OS << "(CXXTemporary";
+ dumpPointer(Temporary);
+ OS << ")";
+}
+
+const char *TextNodeDumper::getCommandName(unsigned CommandID) {
+ if (Traits)
+ return Traits->getCommandInfo(CommandID)->Name;
+ const comments::CommandInfo *Info =
+ comments::CommandTraits::getBuiltinCommandInfo(CommandID);
+ if (Info)
+ return Info->Name;
+ return "<not a builtin command>";
+}
+
+void TextNodeDumper::visitTextComment(const comments::TextComment *C,
+ const comments::FullComment *) {
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void TextNodeDumper::visitInlineCommandComment(
+ const comments::InlineCommandComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
+ switch (C->getRenderKind()) {
+ case comments::InlineCommandComment::RenderNormal:
+ OS << " RenderNormal";
+ break;
+ case comments::InlineCommandComment::RenderBold:
+ OS << " RenderBold";
+ break;
+ case comments::InlineCommandComment::RenderMonospaced:
+ OS << " RenderMonospaced";
+ break;
+ case comments::InlineCommandComment::RenderEmphasized:
+ OS << " RenderEmphasized";
+ break;
+ }
+
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
+ OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
+}
+
+void TextNodeDumper::visitHTMLStartTagComment(
+ const comments::HTMLStartTagComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << C->getTagName() << "\"";
+ if (C->getNumAttrs() != 0) {
+ OS << " Attrs: ";
+ for (unsigned i = 0, e = C->getNumAttrs(); i != e; ++i) {
+ const comments::HTMLStartTagComment::Attribute &Attr = C->getAttr(i);
+ OS << " \"" << Attr.Name << "=\"" << Attr.Value << "\"";
+ }
+ }
+ if (C->isSelfClosing())
+ OS << " SelfClosing";
+}
+
+void TextNodeDumper::visitHTMLEndTagComment(
+ const comments::HTMLEndTagComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << C->getTagName() << "\"";
+}
+
+void TextNodeDumper::visitBlockCommandComment(
+ const comments::BlockCommandComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
+ OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
+}
+
+void TextNodeDumper::visitParamCommandComment(
+ const comments::ParamCommandComment *C, const comments::FullComment *FC) {
+ OS << " "
+ << comments::ParamCommandComment::getDirectionAsString(C->getDirection());
+
+ if (C->isDirectionExplicit())
+ OS << " explicitly";
+ else
+ OS << " implicitly";
+
+ if (C->hasParamName()) {
+ if (C->isParamIndexValid())
+ OS << " Param=\"" << C->getParamName(FC) << "\"";
+ else
+ OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
+ }
+
+ if (C->isParamIndexValid() && !C->isVarArgParam())
+ OS << " ParamIndex=" << C->getParamIndex();
+}
+
+void TextNodeDumper::visitTParamCommandComment(
+ const comments::TParamCommandComment *C, const comments::FullComment *FC) {
+ if (C->hasParamName()) {
+ if (C->isPositionValid())
+ OS << " Param=\"" << C->getParamName(FC) << "\"";
+ else
+ OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
+ }
+
+ if (C->isPositionValid()) {
+ OS << " Position=<";
+ for (unsigned i = 0, e = C->getDepth(); i != e; ++i) {
+ OS << C->getIndex(i);
+ if (i != e - 1)
+ OS << ", ";
+ }
+ OS << ">";
+ }
+}
+
+void TextNodeDumper::visitVerbatimBlockComment(
+ const comments::VerbatimBlockComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << getCommandName(C->getCommandID())
+ << "\""
+ " CloseName=\""
+ << C->getCloseName() << "\"";
+}
+
+void TextNodeDumper::visitVerbatimBlockLineComment(
+ const comments::VerbatimBlockLineComment *C,
+ const comments::FullComment *) {
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void TextNodeDumper::visitVerbatimLineComment(
+ const comments::VerbatimLineComment *C, const comments::FullComment *) {
+ OS << " Text=\"" << C->getText() << "\"";
+}
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 2e1aed26fd..0dbc88c045 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -291,6 +291,14 @@ QualType QualType::getSingleStepDesugaredTypeImpl(QualType type,
return Context.getQualifiedType(desugar, split.Quals);
}
+// Check that no type class is polymorphic. LLVM style RTTI should be used
+// instead. If absolutely needed an exception can still be added here by
+// defining the appropriate macro (but please don't do this).
+#define TYPE(CLASS, BASE) \
+ static_assert(!std::is_polymorphic<CLASS##Type>::value, \
+ #CLASS "Type should not be polymorphic!");
+#include "clang/AST/TypeNodes.def"
+
QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const {
switch (getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
@@ -1965,6 +1973,7 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer;
if (BT->isInteger()) return STK_Integral;
if (BT->isFloatingPoint()) return STK_Floating;
+ if (BT->isFixedPointType()) return STK_FixedPoint;
llvm_unreachable("unknown scalar builtin type");
} else if (isa<PointerType>(T)) {
return STK_CPointer;
@@ -2785,6 +2794,10 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "reserve_id_t";
case OMPArraySection:
return "<OpenMP array section type>";
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case Id: \
+ return #ExtType;
+#include "clang/Basic/OpenCLExtensionTypes.def"
}
llvm_unreachable("Invalid builtin type.");
@@ -2819,6 +2832,7 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_X86RegCall : return "regcall";
case CC_AAPCS: return "aapcs";
case CC_AAPCS_VFP: return "aapcs-vfp";
+ case CC_AArch64VectorCall: return "aarch64_vector_pcs";
case CC_IntelOclBicc: return "intel_ocl_bicc";
case CC_SpirFunction: return "spir_function";
case CC_OpenCLKernel: return "opencl_kernel";
@@ -2833,24 +2847,28 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
QualType canonical,
const ExtProtoInfo &epi)
- : FunctionType(FunctionProto, result, canonical,
- result->isDependentType(),
+ : FunctionType(FunctionProto, result, canonical, result->isDependentType(),
result->isInstantiationDependentType(),
result->isVariablyModifiedType(),
- result->containsUnexpandedParameterPack(), epi.ExtInfo),
- NumParams(params.size()),
- NumExceptions(epi.ExceptionSpec.Exceptions.size()),
- ExceptionSpecType(epi.ExceptionSpec.Type),
- HasExtParameterInfos(epi.ExtParameterInfos != nullptr),
- Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn) {
- assert(NumParams == params.size() && "function has too many parameters");
-
- FunctionTypeBits.TypeQuals = epi.TypeQuals;
+ result->containsUnexpandedParameterPack(), epi.ExtInfo) {
+ FunctionTypeBits.FastTypeQuals = epi.TypeQuals.getFastQualifiers();
FunctionTypeBits.RefQualifier = epi.RefQualifier;
+ FunctionTypeBits.NumParams = params.size();
+ assert(getNumParams() == params.size() && "NumParams overflow!");
+ FunctionTypeBits.ExceptionSpecType = epi.ExceptionSpec.Type;
+ FunctionTypeBits.HasExtParameterInfos = !!epi.ExtParameterInfos;
+ FunctionTypeBits.Variadic = epi.Variadic;
+ FunctionTypeBits.HasTrailingReturn = epi.HasTrailingReturn;
+
+ // Fill in the extra trailing bitfields if present.
+ if (hasExtraBitfields(epi.ExceptionSpec.Type)) {
+ auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
+ ExtraBits.NumExceptionType = epi.ExceptionSpec.Exceptions.size();
+ }
// Fill in the trailing argument array.
- auto *argSlot = reinterpret_cast<QualType *>(this+1);
- for (unsigned i = 0; i != NumParams; ++i) {
+ auto *argSlot = getTrailingObjects<QualType>();
+ for (unsigned i = 0; i != getNumParams(); ++i) {
if (params[i]->isDependentType())
setDependent();
else if (params[i]->isInstantiationDependentType())
@@ -2862,9 +2880,11 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
argSlot[i] = params[i];
}
+ // Fill in the exception type array if present.
if (getExceptionSpecType() == EST_Dynamic) {
- // Fill in the exception array.
- QualType *exnSlot = argSlot + NumParams;
+ assert(hasExtraBitfields() && "missing trailing extra bitfields!");
+ auto *exnSlot =
+ reinterpret_cast<QualType *>(getTrailingObjects<ExceptionType>());
unsigned I = 0;
for (QualType ExceptionType : epi.ExceptionSpec.Exceptions) {
// Note that, before C++17, a dependent exception specification does
@@ -2878,14 +2898,15 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
exnSlot[I++] = ExceptionType;
}
- } else if (isComputedNoexcept(getExceptionSpecType())) {
+ }
+ // Fill in the Expr * in the exception specification if present.
+ else if (isComputedNoexcept(getExceptionSpecType())) {
assert(epi.ExceptionSpec.NoexceptExpr && "computed noexcept with no expr");
assert((getExceptionSpecType() == EST_DependentNoexcept) ==
epi.ExceptionSpec.NoexceptExpr->isValueDependent());
// Store the noexcept expression and context.
- auto **noexSlot = reinterpret_cast<Expr **>(argSlot + NumParams);
- *noexSlot = epi.ExceptionSpec.NoexceptExpr;
+ *getTrailingObjects<Expr *>() = epi.ExceptionSpec.NoexceptExpr;
if (epi.ExceptionSpec.NoexceptExpr->isValueDependent() ||
epi.ExceptionSpec.NoexceptExpr->isInstantiationDependent())
@@ -2893,10 +2914,12 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
if (epi.ExceptionSpec.NoexceptExpr->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
- } else if (getExceptionSpecType() == EST_Uninstantiated) {
+ }
+ // Fill in the FunctionDecl * in the exception specification if present.
+ else if (getExceptionSpecType() == EST_Uninstantiated) {
// Store the function decl from which we will resolve our
// exception specification.
- auto **slot = reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
+ auto **slot = getTrailingObjects<FunctionDecl *>();
slot[0] = epi.ExceptionSpec.SourceDecl;
slot[1] = epi.ExceptionSpec.SourceTemplate;
// This exception specification doesn't make the type dependent, because
@@ -2904,7 +2927,7 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
} else if (getExceptionSpecType() == EST_Unevaluated) {
// Store the function decl from which we will resolve our
// exception specification.
- auto **slot = reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
+ auto **slot = getTrailingObjects<FunctionDecl *>();
slot[0] = epi.ExceptionSpec.SourceDecl;
}
@@ -2921,12 +2944,19 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
setDependent();
}
+ // Fill in the extra parameter info if present.
if (epi.ExtParameterInfos) {
- auto *extParamInfos =
- const_cast<ExtParameterInfo *>(getExtParameterInfosBuffer());
- for (unsigned i = 0; i != NumParams; ++i)
+ auto *extParamInfos = getTrailingObjects<ExtParameterInfo>();
+ for (unsigned i = 0; i != getNumParams(); ++i)
extParamInfos[i] = epi.ExtParameterInfos[i];
}
+
+ if (epi.TypeQuals.hasNonFastQualifiers()) {
+ FunctionTypeBits.HasExtQuals = 1;
+ *getTrailingObjects<Qualifiers>() = epi.TypeQuals;
+ } else {
+ FunctionTypeBits.HasExtQuals = 0;
+ }
}
bool FunctionProtoType::hasDependentExceptionSpec() const {
@@ -2970,7 +3000,7 @@ CanThrowResult FunctionProtoType::canThrow() const {
case EST_Dynamic:
// A dynamic exception specification is throwing unless every exception
// type is an (unexpanded) pack expansion type.
- for (unsigned I = 0, N = NumExceptions; I != N; ++I)
+ for (unsigned I = 0; I != getNumExceptions(); ++I)
if (!getExceptionType(I)->getAs<PackExpansionType>())
return CT_Can;
return CT_Dependent;
@@ -3018,14 +3048,13 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
// shortcut, use one AddInteger call instead of four for the next four
// fields.
assert(!(unsigned(epi.Variadic) & ~1) &&
- !(unsigned(epi.TypeQuals) & ~255) &&
!(unsigned(epi.RefQualifier) & ~3) &&
!(unsigned(epi.ExceptionSpec.Type) & ~15) &&
"Values larger than expected.");
ID.AddInteger(unsigned(epi.Variadic) +
- (epi.TypeQuals << 1) +
- (epi.RefQualifier << 9) +
- (epi.ExceptionSpec.Type << 11));
+ (epi.RefQualifier << 1) +
+ (epi.ExceptionSpec.Type << 3));
+ ID.Add(epi.TypeQuals);
if (epi.ExceptionSpec.Type == EST_Dynamic) {
for (QualType Ex : epi.ExceptionSpec.Exceptions)
ID.AddPointer(Ex.getAsOpaquePtr());
@@ -3045,8 +3074,8 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Ctx) {
- Profile(ID, getReturnType(), param_type_begin(), NumParams, getExtProtoInfo(),
- Ctx, isCanonicalUnqualified());
+ Profile(ID, getReturnType(), param_type_begin(), getNumParams(),
+ getExtProtoInfo(), Ctx, isCanonicalUnqualified());
}
QualType TypedefType::desugar() const {
@@ -3143,14 +3172,23 @@ bool TagType::isBeingDefined() const {
}
bool RecordType::hasConstFields() const {
- for (FieldDecl *FD : getDecl()->fields()) {
- QualType FieldTy = FD->getType();
- if (FieldTy.isConstQualified())
- return true;
- FieldTy = FieldTy.getCanonicalType();
- if (const auto *FieldRecTy = FieldTy->getAs<RecordType>())
- if (FieldRecTy->hasConstFields())
+ std::vector<const RecordType*> RecordTypeList;
+ RecordTypeList.push_back(this);
+ unsigned NextToCheckIndex = 0;
+
+ while (RecordTypeList.size() > NextToCheckIndex) {
+ for (FieldDecl *FD :
+ RecordTypeList[NextToCheckIndex]->getDecl()->fields()) {
+ QualType FieldTy = FD->getType();
+ if (FieldTy.isConstQualified())
return true;
+ FieldTy = FieldTy.getCanonicalType();
+ if (const auto *FieldRecTy = FieldTy->getAs<RecordType>()) {
+ if (llvm::find(RecordTypeList, FieldRecTy) == RecordTypeList.end())
+ RecordTypeList.push_back(FieldRecTy);
+ }
+ }
+ ++NextToCheckIndex;
}
return false;
}
@@ -3202,6 +3240,7 @@ bool AttributedType::isCallingConv() const {
case attr::RegCall:
case attr::SwiftCall:
case attr::VectorCall:
+ case attr::AArch64VectorPcs:
case attr::Pascal:
case attr::MSABI:
case attr::SysVABI:
@@ -3736,6 +3775,9 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index b6dc679bbf..b7b2f188d7 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -384,6 +384,9 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index fe05127c7f..031b44f11e 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -801,10 +801,8 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
printFunctionAfter(Info, OS);
- if (unsigned quals = T->getTypeQuals()) {
- OS << ' ';
- AppendTypeQualList(OS, quals, Policy.Restrict);
- }
+ if (!T->getTypeQuals().empty())
+ OS << " " << T->getTypeQuals().getAsString();
switch (T->getRefQualifier()) {
case RQ_None:
@@ -861,6 +859,9 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
case CC_AAPCS_VFP:
OS << " __attribute__((pcs(\"aapcs-vfp\")))";
break;
+ case CC_AArch64VectorCall:
+ OS << "__attribute__((aarch64_vector_pcs))";
+ break;
case CC_IntelOclBicc:
OS << " __attribute__((intel_ocl_bicc))";
break;
@@ -1154,9 +1155,13 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
D->getLocation());
if (PLoc.isValid()) {
- OS << " at " << PLoc.getFilename()
- << ':' << PLoc.getLine()
- << ':' << PLoc.getColumn();
+ OS << " at ";
+ StringRef File = PLoc.getFilename();
+ if (Policy.RemapFilePaths)
+ OS << Policy.remapPath(File);
+ else
+ OS << File;
+ OS << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
}
}
@@ -1492,7 +1497,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
OS << ')';
break;
}
-
+ case attr::AArch64VectorPcs: OS << "aarch64_vector_pcs"; break;
case attr::IntelOclBicc: OS << "inteloclbicc"; break;
case attr::PreserveMost:
OS << "preserve_most";
@@ -1501,6 +1506,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::PreserveAll:
OS << "preserve_all";
break;
+ case attr::NoDeref:
+ OS << "noderef";
+ break;
}
OS << "))";
}
diff --git a/lib/AST/VTableBuilder.cpp b/lib/AST/VTableBuilder.cpp
index 46844daf3b..846a608574 100644
--- a/lib/AST/VTableBuilder.cpp
+++ b/lib/AST/VTableBuilder.cpp
@@ -2205,13 +2205,12 @@ VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
else
this->VTableIndices = OwningArrayRef<size_t>(VTableIndices);
- llvm::sort(this->VTableThunks.begin(), this->VTableThunks.end(),
- [](const VTableLayout::VTableThunkTy &LHS,
- const VTableLayout::VTableThunkTy &RHS) {
- assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
- "Different thunks should have unique indices!");
- return LHS.first < RHS.first;
- });
+ llvm::sort(this->VTableThunks, [](const VTableLayout::VTableThunkTy &LHS,
+ const VTableLayout::VTableThunkTy &RHS) {
+ assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
+ "Different thunks should have unique indices!");
+ return LHS.first < RHS.first;
+ });
}
VTableLayout::~VTableLayout() { }
@@ -3407,10 +3406,9 @@ static void removeRedundantPaths(std::list<FullPathTy> &FullPaths) {
for (const FullPathTy &OtherPath : FullPaths) {
if (&SpecificPath == &OtherPath)
continue;
- if (std::all_of(SpecificPath.begin(), SpecificPath.end(),
- [&](const BaseSubobject &BSO) {
- return OtherPath.count(BSO) != 0;
- })) {
+ if (llvm::all_of(SpecificPath, [&](const BaseSubobject &BSO) {
+ return OtherPath.count(BSO) != 0;
+ })) {
return true;
}
}
@@ -3486,10 +3484,9 @@ static const FullPathTy *selectBestPath(ASTContext &Context,
// It's possible that the overrider isn't in this path. If so, skip it
// because this path didn't introduce it.
const CXXRecordDecl *OverridingParent = OverridingMethod->getParent();
- if (std::none_of(SpecificPath.begin(), SpecificPath.end(),
- [&](const BaseSubobject &BSO) {
- return BSO.getBase() == OverridingParent;
- }))
+ if (llvm::none_of(SpecificPath, [&](const BaseSubobject &BSO) {
+ return BSO.getBase() == OverridingParent;
+ }))
continue;
CurrentOverrides.insert(OverridingMethod);
}
diff --git a/lib/ASTMatchers/ASTMatchFinder.cpp b/lib/ASTMatchers/ASTMatchFinder.cpp
index 63f8395b82..32d8282f6d 100644
--- a/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -635,10 +635,6 @@ private:
bool memoizedMatchesAncestorOfRecursively(
const ast_type_traits::DynTypedNode &Node, const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, AncestorMatchMode MatchMode) {
- if (Node.get<TranslationUnitDecl>() ==
- ActiveASTContext->getTranslationUnitDecl())
- return false;
-
// For AST-nodes that don't have an identity, we can't memoize.
if (!Builder->isComparable())
return matchesAncestorOfRecursively(Node, Matcher, Builder, MatchMode);
@@ -673,7 +669,22 @@ private:
BoundNodesTreeBuilder *Builder,
AncestorMatchMode MatchMode) {
const auto &Parents = ActiveASTContext->getParents(Node);
- assert(!Parents.empty() && "Found node that is not in the parent map.");
+ if (Parents.empty()) {
+ // Nodes may have no parents if:
+ // a) the node is the TranslationUnitDecl
+ // b) we have a limited traversal scope that excludes the parent edges
+ // c) there is a bug in the AST, and the node is not reachable
+ // Usually the traversal scope is the whole AST, which precludes b.
+ // Bugs are common enough that it's worthwhile asserting when we can.
+ assert((Node.get<TranslationUnitDecl>() ||
+ /* Traversal scope is limited if none of the bounds are the TU */
+ llvm::none_of(ActiveASTContext->getTraversalScope(),
+ [](Decl *D) {
+ return D->getKind() == Decl::TranslationUnit;
+ })) &&
+ "Found node that is not in the complete parent map!");
+ return false;
+ }
if (Parents.size() == 1) {
// Only one parent - do recursive memoization.
const ast_type_traits::DynTypedNode Parent = Parents[0];
@@ -1019,7 +1030,7 @@ void MatchFinder::matchAST(ASTContext &Context) {
internal::MatchASTVisitor Visitor(&Matchers, Options);
Visitor.set_active_ast_context(&Context);
Visitor.onStartOfTranslationUnit();
- Visitor.TraverseDecl(Context.getTranslationUnitDecl());
+ Visitor.TraverseAST(Context);
Visitor.onEndOfTranslationUnit();
}
diff --git a/lib/ASTMatchers/ASTMatchersInternal.cpp b/lib/ASTMatchers/ASTMatchersInternal.cpp
index 7b551a4eee..6a87a67275 100644
--- a/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -144,10 +144,10 @@ DynTypedMatcher DynTypedMatcher::constructVariadic(
ast_type_traits::ASTNodeKind SupportedKind,
std::vector<DynTypedMatcher> InnerMatchers) {
assert(!InnerMatchers.empty() && "Array must not be empty.");
- assert(std::all_of(InnerMatchers.begin(), InnerMatchers.end(),
- [SupportedKind](const DynTypedMatcher &M) {
- return M.canConvertTo(SupportedKind);
- }) &&
+ assert(llvm::all_of(InnerMatchers,
+ [SupportedKind](const DynTypedMatcher &M) {
+ return M.canConvertTo(SupportedKind);
+ }) &&
"InnerMatchers must be convertible to SupportedKind!");
// We must relax the restrict kind here.
@@ -449,7 +449,7 @@ bool HasNameMatcher::matchesNodeUnqualified(const NamedDecl &Node) const {
assert(UseUnqualifiedMatch);
llvm::SmallString<128> Scratch;
StringRef NodeName = getNodeName(Node, Scratch);
- return std::any_of(Names.begin(), Names.end(), [&](StringRef Name) {
+ return llvm::any_of(Names, [&](StringRef Name) {
return consumeNameSuffix(Name, NodeName) && Name.empty();
});
}
@@ -666,6 +666,7 @@ const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
@@ -687,6 +688,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
diff --git a/lib/ASTMatchers/Dynamic/Parser.cpp b/lib/ASTMatchers/Dynamic/Parser.cpp
index 96362cd4bc..5db10048fd 100644
--- a/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -110,6 +110,10 @@ private:
}
switch (Code[0]) {
+ case '#':
+ Result.Kind = TokenInfo::TK_Eof;
+ Result.Text = "";
+ return Result;
case ',':
Result.Kind = TokenInfo::TK_Comma;
Result.Text = Code.substr(0, 1);
@@ -645,12 +649,12 @@ Parser::completeExpression(StringRef Code, unsigned CompletionOffset, Sema *S,
P.parseExpressionImpl(&Dummy);
// Sort by specificity, then by name.
- llvm::sort(P.Completions.begin(), P.Completions.end(),
+ llvm::sort(P.Completions,
[](const MatcherCompletion &A, const MatcherCompletion &B) {
- if (A.Specificity != B.Specificity)
- return A.Specificity > B.Specificity;
- return A.TypedText < B.TypedText;
- });
+ if (A.Specificity != B.Specificity)
+ return A.Specificity > B.Specificity;
+ return A.TypedText < B.TypedText;
+ });
return P.Completions;
}
diff --git a/lib/ASTMatchers/Dynamic/Registry.cpp b/lib/ASTMatchers/Dynamic/Registry.cpp
index e2f4adb010..5f11c153dc 100644
--- a/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -106,6 +106,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_OVERLOADED_2(callee);
REGISTER_OVERLOADED_2(hasPrefix);
REGISTER_OVERLOADED_2(hasType);
+ REGISTER_OVERLOADED_2(ignoringParens);
REGISTER_OVERLOADED_2(isDerivedFrom);
REGISTER_OVERLOADED_2(isSameOrDerivedFrom);
REGISTER_OVERLOADED_2(loc);
@@ -133,11 +134,12 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(asString);
REGISTER_MATCHER(atomicExpr);
REGISTER_MATCHER(atomicType);
- REGISTER_MATCHER(autoType);
REGISTER_MATCHER(autoreleasePoolStmt)
- REGISTER_MATCHER(binaryOperator);
+ REGISTER_MATCHER(autoType);
REGISTER_MATCHER(binaryConditionalOperator);
+ REGISTER_MATCHER(binaryOperator);
REGISTER_MATCHER(blockDecl);
+ REGISTER_MATCHER(blockExpr);
REGISTER_MATCHER(blockPointerType);
REGISTER_MATCHER(booleanType);
REGISTER_MATCHER(breakStmt);
@@ -153,6 +155,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(compoundStmt);
REGISTER_MATCHER(conditionalOperator);
REGISTER_MATCHER(constantArrayType);
+ REGISTER_MATCHER(constantExpr);
REGISTER_MATCHER(containsDeclaration);
REGISTER_MATCHER(continueStmt);
REGISTER_MATCHER(cStyleCastExpr);
@@ -189,10 +192,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(decayedType);
REGISTER_MATCHER(decl);
REGISTER_MATCHER(declaratorDecl);
- REGISTER_MATCHER(decltypeType);
REGISTER_MATCHER(declCountIs);
REGISTER_MATCHER(declRefExpr);
REGISTER_MATCHER(declStmt);
+ REGISTER_MATCHER(decltypeType);
REGISTER_MATCHER(defaultStmt);
REGISTER_MATCHER(dependentSizedArrayType);
REGISTER_MATCHER(designatedInitExpr);
@@ -233,6 +236,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasAnyDeclaration);
REGISTER_MATCHER(hasAnyName);
REGISTER_MATCHER(hasAnyParameter);
+ REGISTER_MATCHER(hasAnySelector);
REGISTER_MATCHER(hasAnySubstatement);
REGISTER_MATCHER(hasAnyTemplateArgument);
REGISTER_MATCHER(hasAnyUsingShadowDecl);
@@ -291,11 +295,11 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasReturnValue);
REGISTER_MATCHER(hasRHS);
REGISTER_MATCHER(hasSelector);
- REGISTER_MATCHER(hasAnySelector);
REGISTER_MATCHER(hasSingleDecl);
REGISTER_MATCHER(hasSize);
REGISTER_MATCHER(hasSizeExpr);
REGISTER_MATCHER(hasSourceExpression);
+ REGISTER_MATCHER(hasSpecializedTemplate);
REGISTER_MATCHER(hasStaticStorageDuration);
REGISTER_MATCHER(hasSyntacticForm);
REGISTER_MATCHER(hasTargetDecl);
@@ -308,14 +312,15 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasUnaryOperand);
REGISTER_MATCHER(hasUnarySelector);
REGISTER_MATCHER(hasUnderlyingDecl);
+ REGISTER_MATCHER(hasUnderlyingType);
REGISTER_MATCHER(hasUnqualifiedDesugaredType);
REGISTER_MATCHER(hasValueType);
REGISTER_MATCHER(ifStmt);
- REGISTER_MATCHER(ignoringImplicit);
REGISTER_MATCHER(ignoringImpCasts);
+ REGISTER_MATCHER(ignoringImplicit);
REGISTER_MATCHER(ignoringParenCasts);
REGISTER_MATCHER(ignoringParenImpCasts);
- REGISTER_MATCHER(ignoringParens);
+ REGISTER_MATCHER(imaginaryLiteral);
REGISTER_MATCHER(implicitCastExpr);
REGISTER_MATCHER(implicitValueInitExpr);
REGISTER_MATCHER(incompleteArrayType);
@@ -342,22 +347,23 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isDefaulted);
REGISTER_MATCHER(isDefinition);
REGISTER_MATCHER(isDeleted);
+ REGISTER_MATCHER(isDelegatingConstructor);
REGISTER_MATCHER(isExceptionVariable);
+ REGISTER_MATCHER(isExpansionInFileMatching);
+ REGISTER_MATCHER(isExpansionInMainFile);
+ REGISTER_MATCHER(isExpansionInSystemHeader);
REGISTER_MATCHER(isExplicit);
REGISTER_MATCHER(isExplicitTemplateSpecialization);
REGISTER_MATCHER(isExpr);
REGISTER_MATCHER(isExternC);
REGISTER_MATCHER(isFinal);
- REGISTER_MATCHER(isInline);
REGISTER_MATCHER(isImplicit);
- REGISTER_MATCHER(isExpansionInFileMatching);
- REGISTER_MATCHER(isExpansionInMainFile);
+ REGISTER_MATCHER(isInline);
REGISTER_MATCHER(isInstanceMessage);
REGISTER_MATCHER(isInstantiated);
- REGISTER_MATCHER(isExpansionInSystemHeader);
+ REGISTER_MATCHER(isInstantiationDependent);
REGISTER_MATCHER(isInteger);
REGISTER_MATCHER(isIntegral);
- REGISTER_MATCHER(isInstantiationDependent);
REGISTER_MATCHER(isInTemplateInstantiation);
REGISTER_MATCHER(isLambda);
REGISTER_MATCHER(isListInitialization);
@@ -374,12 +380,14 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isPure);
REGISTER_MATCHER(isScoped);
REGISTER_MATCHER(isSignedInteger);
+ REGISTER_MATCHER(isStaticLocal);
REGISTER_MATCHER(isStaticStorageClass);
REGISTER_MATCHER(isStruct);
REGISTER_MATCHER(isTemplateInstantiation);
REGISTER_MATCHER(isTypeDependent);
REGISTER_MATCHER(isUnion);
REGISTER_MATCHER(isUnsignedInteger);
+ REGISTER_MATCHER(isUserProvided);
REGISTER_MATCHER(isValueDependent);
REGISTER_MATCHER(isVariadic);
REGISTER_MATCHER(isVirtual);
@@ -403,10 +411,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(namesType);
REGISTER_MATCHER(nestedNameSpecifier);
REGISTER_MATCHER(nestedNameSpecifierLoc);
+ REGISTER_MATCHER(nonTypeTemplateParmDecl);
REGISTER_MATCHER(nullPointerConstant);
REGISTER_MATCHER(nullStmt);
REGISTER_MATCHER(numSelectorArgs);
- REGISTER_MATCHER(ofClass);
REGISTER_MATCHER(objcCatchStmt);
REGISTER_MATCHER(objcCategoryDecl);
REGISTER_MATCHER(objcCategoryImplDecl);
@@ -422,6 +430,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(objcProtocolDecl);
REGISTER_MATCHER(objcThrowStmt);
REGISTER_MATCHER(objcTryStmt);
+ REGISTER_MATCHER(ofClass);
REGISTER_MATCHER(on);
REGISTER_MATCHER(onImplicitObjectArgument);
REGISTER_MATCHER(opaqueValueExpr);
@@ -441,6 +450,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(refersToDeclaration);
REGISTER_MATCHER(refersToIntegralType);
REGISTER_MATCHER(refersToType);
+ REGISTER_MATCHER(refersToTemplate);
REGISTER_MATCHER(requiresZeroInitialization);
REGISTER_MATCHER(returns);
REGISTER_MATCHER(returnStmt);
@@ -458,9 +468,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(substTemplateTypeParmType);
REGISTER_MATCHER(switchCase);
REGISTER_MATCHER(switchStmt);
+ REGISTER_MATCHER(tagType);
REGISTER_MATCHER(templateArgument);
- REGISTER_MATCHER(templateName);
REGISTER_MATCHER(templateArgumentCountIs);
+ REGISTER_MATCHER(templateName);
REGISTER_MATCHER(templateSpecializationType);
REGISTER_MATCHER(templateTypeParmDecl);
REGISTER_MATCHER(templateTypeParmType);
@@ -468,11 +479,11 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(to);
REGISTER_MATCHER(translationUnitDecl);
REGISTER_MATCHER(type);
+ REGISTER_MATCHER(typeAliasDecl);
+ REGISTER_MATCHER(typeAliasTemplateDecl);
REGISTER_MATCHER(typedefDecl);
REGISTER_MATCHER(typedefNameDecl);
REGISTER_MATCHER(typedefType);
- REGISTER_MATCHER(typeAliasDecl);
- REGISTER_MATCHER(typeAliasTemplateDecl);
REGISTER_MATCHER(typeLoc);
REGISTER_MATCHER(unaryExprOrTypeTraitExpr);
REGISTER_MATCHER(unaryOperator);
@@ -483,6 +494,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(unresolvedUsingTypenameDecl);
REGISTER_MATCHER(unresolvedUsingValueDecl);
REGISTER_MATCHER(userDefinedLiteral);
+ REGISTER_MATCHER(usesADL);
REGISTER_MATCHER(usingDecl);
REGISTER_MATCHER(usingDirectiveDecl);
REGISTER_MATCHER(valueDecl);
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index ac8fcdc912..9c1d529a68 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -201,10 +201,9 @@ ObjCIvarRefExpr *ASTMaker::makeObjCIvarRef(const Expr *Base,
/*arrow=*/true, /*free=*/false);
}
-
ReturnStmt *ASTMaker::makeReturn(const Expr *RetVal) {
- return new (C) ReturnStmt(SourceLocation(), const_cast<Expr*>(RetVal),
- nullptr);
+ return ReturnStmt::Create(C, SourceLocation(), const_cast<Expr *>(RetVal),
+ /* NRVOCandidate=*/nullptr);
}
IntegerLiteral *ASTMaker::makeIntegerLiteral(uint64_t Value, QualType Ty) {
@@ -464,13 +463,13 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
Deref, M.makeIntegralCast(M.makeIntegerLiteral(1, C.IntTy), DerefType),
DerefType);
- IfStmt *Out = new (C)
- IfStmt(C, SourceLocation(),
- /* IsConstexpr=*/ false,
- /* init=*/ nullptr,
- /* var=*/ nullptr,
- /* cond=*/ FlagCheck,
- /* then=*/ M.makeCompound({CallbackCall, FlagAssignment}));
+ auto *Out =
+ IfStmt::Create(C, SourceLocation(),
+ /* IsConstexpr=*/false,
+ /* init=*/nullptr,
+ /* var=*/nullptr,
+ /* cond=*/FlagCheck,
+ /* then=*/M.makeCompound({CallbackCall, FlagAssignment}));
return Out;
}
@@ -549,12 +548,12 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
Expr *GuardCondition = M.makeComparison(LValToRval, DoneValue, BO_NE);
// (5) Create the 'if' statement.
- IfStmt *If = new (C) IfStmt(C, SourceLocation(),
- /* IsConstexpr=*/ false,
- /* init=*/ nullptr,
- /* var=*/ nullptr,
- /* cond=*/ GuardCondition,
- /* then=*/ CS);
+ auto *If = IfStmt::Create(C, SourceLocation(),
+ /* IsConstexpr=*/false,
+ /* init=*/nullptr,
+ /* var=*/nullptr,
+ /* cond=*/GuardCondition,
+ /* then=*/CS);
return If;
}
@@ -657,8 +656,11 @@ static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
Stmt *Else = M.makeReturn(RetVal);
/// Construct the If.
- Stmt *If = new (C) IfStmt(C, SourceLocation(), false, nullptr, nullptr,
- Comparison, Body, SourceLocation(), Else);
+ auto *If = IfStmt::Create(C, SourceLocation(),
+ /* IsConstexpr=*/false,
+ /* init=*/nullptr,
+ /* var=*/nullptr, Comparison, Body,
+ SourceLocation(), Else);
return If;
}
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 6bb181c225..96130c25be 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -551,6 +551,7 @@ private:
CFGBlock *VisitGotoStmt(GotoStmt *G);
CFGBlock *VisitIfStmt(IfStmt *I);
CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitConstantExpr(ConstantExpr *E, AddStmtChoice asc);
CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
CFGBlock *VisitLabelStmt(LabelStmt *L);
CFGBlock *VisitBlockExpr(BlockExpr *E, AddStmtChoice asc);
@@ -571,7 +572,7 @@ private:
CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
CFGBlock *VisitObjCMessageExpr(ObjCMessageExpr *E, AddStmtChoice asc);
CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
- CFGBlock *VisitReturnStmt(ReturnStmt *R);
+ CFGBlock *VisitReturnStmt(Stmt *S);
CFGBlock *VisitSEHExceptStmt(SEHExceptStmt *S);
CFGBlock *VisitSEHFinallyStmt(SEHFinallyStmt *S);
CFGBlock *VisitSEHLeaveStmt(SEHLeaveStmt *S);
@@ -1038,12 +1039,14 @@ private:
if (!areExprTypesCompatible(Expr1, Expr2))
return {};
- llvm::APSInt L1, L2;
-
- if (!Expr1->EvaluateAsInt(L1, *Context) ||
- !Expr2->EvaluateAsInt(L2, *Context))
+ Expr::EvalResult L1Result, L2Result;
+ if (!Expr1->EvaluateAsInt(L1Result, *Context) ||
+ !Expr2->EvaluateAsInt(L2Result, *Context))
return {};
+ llvm::APSInt L1 = L1Result.Val.getInt();
+ llvm::APSInt L2 = L2Result.Val.getInt();
+
// Can't compare signed with unsigned or with different bit width.
if (L1.isSigned() != L2.isSigned() || L1.getBitWidth() != L2.getBitWidth())
return {};
@@ -1133,13 +1136,16 @@ private:
case BO_And: {
// If either operand is zero, we know the value
// must be false.
- llvm::APSInt IntVal;
- if (Bop->getLHS()->EvaluateAsInt(IntVal, *Context)) {
+ Expr::EvalResult LHSResult;
+ if (Bop->getLHS()->EvaluateAsInt(LHSResult, *Context)) {
+ llvm::APSInt IntVal = LHSResult.Val.getInt();
if (!IntVal.getBoolValue()) {
return TryResult(false);
}
}
- if (Bop->getRHS()->EvaluateAsInt(IntVal, *Context)) {
+ Expr::EvalResult RHSResult;
+ if (Bop->getRHS()->EvaluateAsInt(RHSResult, *Context)) {
+ llvm::APSInt IntVal = RHSResult.Val.getInt();
if (!IntVal.getBoolValue()) {
return TryResult(false);
}
@@ -1334,6 +1340,7 @@ void CFGBuilder::findConstructionContexts(
case CK_NoOp:
case CK_ConstructorConversion:
findConstructionContexts(Layer, Cast->getSubExpr());
+ break;
default:
break;
}
@@ -2099,6 +2106,9 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::ImplicitCastExprClass:
return VisitImplicitCastExpr(cast<ImplicitCastExpr>(S), asc);
+ case Stmt::ConstantExprClass:
+ return VisitConstantExpr(cast<ConstantExpr>(S), asc);
+
case Stmt::IndirectGotoStmtClass:
return VisitIndirectGotoStmt(cast<IndirectGotoStmt>(S));
@@ -2146,7 +2156,8 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
return VisitPseudoObjectExpr(cast<PseudoObjectExpr>(S));
case Stmt::ReturnStmtClass:
- return VisitReturnStmt(cast<ReturnStmt>(S));
+ case Stmt::CoreturnStmtClass:
+ return VisitReturnStmt(S);
case Stmt::SEHExceptStmtClass:
return VisitSEHExceptStmt(cast<SEHExceptStmt>(S));
@@ -2632,15 +2643,12 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
for (DeclStmt::reverse_decl_iterator I = DS->decl_rbegin(),
E = DS->decl_rend();
I != E; ++I) {
- // Get the alignment of the new DeclStmt, padding out to >=8 bytes.
- unsigned A = alignof(DeclStmt) < 8 ? 8 : alignof(DeclStmt);
// Allocate the DeclStmt using the BumpPtrAllocator. It will get
// automatically freed with the CFG.
DeclGroupRef DG(*I);
Decl *D = *I;
- void *Mem = cfg->getAllocator().Allocate(sizeof(DeclStmt), A);
- DeclStmt *DSNew = new (Mem) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
+ DeclStmt *DSNew = new (Context) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
cfg->addSyntheticDeclStmt(DSNew, DS);
// Append the fake DeclStmt to block.
@@ -2879,22 +2887,24 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
return LastBlock;
}
-CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) {
+CFGBlock *CFGBuilder::VisitReturnStmt(Stmt *S) {
// If we were in the middle of a block we stop processing that block.
//
- // NOTE: If a "return" appears in the middle of a block, this means that the
- // code afterwards is DEAD (unreachable). We still keep a basic block
- // for that code; a simple "mark-and-sweep" from the entry block will be
- // able to report such dead blocks.
+ // NOTE: If a "return" or "co_return" appears in the middle of a block, this
+ // means that the code afterwards is DEAD (unreachable). We still keep
+ // a basic block for that code; a simple "mark-and-sweep" from the entry
+ // block will be able to report such dead blocks.
+ assert(isa<ReturnStmt>(S) || isa<CoreturnStmt>(S));
// Create the new block.
Block = createBlock(false);
- addAutomaticObjHandling(ScopePos, LocalScope::const_iterator(), R);
+ addAutomaticObjHandling(ScopePos, LocalScope::const_iterator(), S);
- findConstructionContexts(
- ConstructionContextLayer::create(cfg->getBumpVectorContext(), R),
- R->getRetValue());
+ if (auto *R = dyn_cast<ReturnStmt>(S))
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), R),
+ R->getRetValue());
// If the one of the destructors does not return, we already have the Exit
// block as a successor.
@@ -2903,7 +2913,7 @@ CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) {
// Add the return statement to the block. This may create new blocks if R
// contains control-flow (short-circuit operations).
- return VisitStmt(R, AddStmtChoice::AlwaysAdd);
+ return VisitStmt(S, AddStmtChoice::AlwaysAdd);
}
CFGBlock *CFGBuilder::VisitSEHExceptStmt(SEHExceptStmt *ES) {
@@ -4379,6 +4389,10 @@ CFGBlock *CFGBuilder::VisitImplicitCastExpr(ImplicitCastExpr *E,
return Visit(E->getSubExpr(), AddStmtChoice());
}
+CFGBlock *CFGBuilder::VisitConstantExpr(ConstantExpr *E, AddStmtChoice asc) {
+ return Visit(E->getSubExpr(), AddStmtChoice());
+}
+
CFGBlock *CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt *I) {
// Lazily create the indirect-goto dispatch block if there isn't one already.
CFGBlock *IBlock = cfg->getIndirectGotoBlock();
@@ -4435,6 +4449,10 @@ tryAgain:
E = cast<CXXFunctionalCastExpr>(E)->getSubExpr();
goto tryAgain;
+ case Stmt::ConstantExprClass:
+ E = cast<ConstantExpr>(E)->getSubExpr();
+ goto tryAgain;
+
case Stmt::ParenExprClass:
E = cast<ParenExpr>(E)->getSubExpr();
goto tryAgain;
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index 36a37f4985..5345a56f20 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -16,15 +16,11 @@ add_clang_library(clangAnalysis
CodeInjector.cpp
Dominators.cpp
ExprMutationAnalyzer.cpp
- FormatString.cpp
LiveVariables.cpp
- OSLog.cpp
ObjCNoReturn.cpp
PostOrderCFGView.cpp
- PrintfFormatString.cpp
ProgramPoint.cpp
ReachableCode.cpp
- ScanfFormatString.cpp
ThreadSafety.cpp
ThreadSafetyCommon.cpp
ThreadSafetyLogical.cpp
diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp
index bac00680ff..66a6f1a9bc 100644
--- a/lib/Analysis/CallGraph.cpp
+++ b/lib/Analysis/CallGraph.cpp
@@ -212,7 +212,7 @@ void CallGraph::viewGraph() const {
void CallGraphNode::print(raw_ostream &os) const {
if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(FD))
- return ND->printName(os);
+ return ND->printQualifiedName(os);
os << "< >";
}
diff --git a/lib/Analysis/CloneDetection.cpp b/lib/Analysis/CloneDetection.cpp
index 9a18eacf48..88402e2ada 100644
--- a/lib/Analysis/CloneDetection.cpp
+++ b/lib/Analysis/CloneDetection.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
///
-/// This file implements classes for searching and anlyzing source code clones.
+/// This file implements classes for searching and analyzing source code clones.
///
//===----------------------------------------------------------------------===//
@@ -523,8 +523,7 @@ void CloneConstraint::splitCloneGroups(
Result.push_back(PotentialGroup);
}
- assert(std::all_of(Indexes.begin(), Indexes.end(),
- [](char c) { return c == 1; }));
+ assert(llvm::all_of(Indexes, [](char c) { return c == 1; }));
}
CloneGroups = Result;
}
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index f7bfcadf59..4f22ccc55a 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -237,8 +237,8 @@ static const Stmt *LookThroughStmt(const Stmt *S) {
while (S) {
if (const Expr *Ex = dyn_cast<Expr>(S))
S = Ex->IgnoreParens();
- if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S)) {
- S = EWC->getSubExpr();
+ if (const FullExpr *FE = dyn_cast<FullExpr>(S)) {
+ S = FE->getSubExpr();
continue;
}
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
diff --git a/lib/Analysis/ProgramPoint.cpp b/lib/Analysis/ProgramPoint.cpp
index 9a04cb4152..2d016cb133 100644
--- a/lib/Analysis/ProgramPoint.cpp
+++ b/lib/Analysis/ProgramPoint.cpp
@@ -43,6 +43,10 @@ ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
}
}
+LLVM_DUMP_METHOD void ProgramPoint::dump() const {
+ return print(/*CR=*/"\n", llvm::errs());
+}
+
static void printLocation(raw_ostream &Out, SourceLocation SLoc,
const SourceManager &SM,
StringRef CR,
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index 0d4b78ba02..87f4f7010f 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -219,7 +219,7 @@ static bool isConfigurationValue(const Stmt *S,
return isConfigurationValue(cast<DeclRefExpr>(S)->getDecl(), PP);
case Stmt::ObjCBoolLiteralExprClass:
IgnoreYES_NO = true;
- // Fallthrough.
+ LLVM_FALLTHROUGH;
case Stmt::CXXBoolLiteralExprClass:
case Stmt::IntegerLiteralClass: {
const Expr *E = cast<Expr>(S);
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index 65904fd360..f83b0e0cab 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -33,6 +33,7 @@
#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
@@ -1388,14 +1389,17 @@ const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
if (!Cond)
return nullptr;
- if (const auto *CallExp = dyn_cast<CallExpr>(Cond))
+ if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
+ if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
+ return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
return CallExp;
+ }
else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
- else if (const auto *EWC = dyn_cast<ExprWithCleanups>(Cond))
- return getTrylockCallExpr(EWC->getSubExpr(), C, Negate);
+ else if (const auto *FE = dyn_cast<FullExpr>(Cond))
+ return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
return getTrylockCallExpr(E, C, Negate);
@@ -1431,6 +1435,17 @@ const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
if (BOP->getOpcode() == BO_LOr)
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
return nullptr;
+ } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
+ bool TCond, FCond;
+ if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
+ getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
+ if (TCond && !FCond)
+ return getTrylockCallExpr(COP->getCond(), C, Negate);
+ if (!TCond && FCond) {
+ Negate = !Negate;
+ return getTrylockCallExpr(COP->getCond(), C, Negate);
+ }
+ }
}
return nullptr;
}
@@ -1445,7 +1460,8 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
Result = ExitSet;
const Stmt *Cond = PredBlock->getTerminatorCondition();
- if (!Cond)
+ // We don't acquire try-locks on ?: branches, only when its result is used.
+ if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminator()))
return;
bool Negate = false;
@@ -1534,6 +1550,10 @@ class BuildLockset : public ConstStmtVisitor<BuildLockset> {
ProtectedOperationKind POK = POK_VarAccess);
void handleCall(const Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
+ void examineArguments(const FunctionDecl *FD,
+ CallExpr::const_arg_iterator ArgBegin,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool SkipFirstParam = false);
public:
BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
@@ -1934,10 +1954,37 @@ void BuildLockset::VisitCastExpr(const CastExpr *CE) {
checkAccess(CE->getSubExpr(), AK_Read);
}
-void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
- bool ExamineArgs = true;
- bool OperatorFun = false;
+void BuildLockset::examineArguments(const FunctionDecl *FD,
+ CallExpr::const_arg_iterator ArgBegin,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool SkipFirstParam) {
+ // Currently we can't do anything if we don't know the function declaration.
+ if (!FD)
+ return;
+ // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
+ // only turns off checking within the body of a function, but we also
+ // use it to turn off checking in arguments to the function. This
+ // could result in some false negatives, but the alternative is to
+ // create yet another attribute.
+ if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
+ return;
+
+ const ArrayRef<ParmVarDecl *> Params = FD->parameters();
+ auto Param = Params.begin();
+ if (SkipFirstParam)
+ ++Param;
+
+ // There can be default arguments, so we stop when one iterator is at end().
+ for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
+ ++Param, ++Arg) {
+ QualType Qt = (*Param)->getType();
+ if (Qt->isReferenceType())
+ checkAccess(*Arg, AK_Read, POK_PassByRef);
+ }
+}
+
+void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
// ME can be null when calling a method pointer
@@ -1956,13 +2003,12 @@ void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
checkAccess(CE->getImplicitObjectArgument(), AK_Read);
}
}
- } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
- OperatorFun = true;
+ examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
+ } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
auto OEop = OE->getOperator();
switch (OEop) {
case OO_Equal: {
- ExamineArgs = false;
const Expr *Target = OE->getArg(0);
const Expr *Source = OE->getArg(1);
checkAccess(Target, AK_Written);
@@ -1971,60 +2017,27 @@ void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
}
case OO_Star:
case OO_Arrow:
- case OO_Subscript: {
- const Expr *Obj = OE->getArg(0);
- checkAccess(Obj, AK_Read);
+ case OO_Subscript:
if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
// Grrr. operator* can be multiplication...
- checkPtAccess(Obj, AK_Read);
+ checkPtAccess(OE->getArg(0), AK_Read);
}
- break;
- }
+ LLVM_FALLTHROUGH;
default: {
// TODO: get rid of this, and rely on pass-by-ref instead.
const Expr *Obj = OE->getArg(0);
checkAccess(Obj, AK_Read);
+ // Check the remaining arguments. For method operators, the first
+ // argument is the implicit self argument, and doesn't appear in the
+ // FunctionDecl, but for non-methods it does.
+ const FunctionDecl *FD = OE->getDirectCallee();
+ examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
+ /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
break;
}
}
- }
-
- if (ExamineArgs) {
- if (const FunctionDecl *FD = Exp->getDirectCallee()) {
- // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
- // only turns off checking within the body of a function, but we also
- // use it to turn off checking in arguments to the function. This
- // could result in some false negatives, but the alternative is to
- // create yet another attribute.
- if (!FD->hasAttr<NoThreadSafetyAnalysisAttr>()) {
- unsigned Fn = FD->getNumParams();
- unsigned Cn = Exp->getNumArgs();
- unsigned Skip = 0;
-
- unsigned i = 0;
- if (OperatorFun) {
- if (isa<CXXMethodDecl>(FD)) {
- // First arg in operator call is implicit self argument,
- // and doesn't appear in the FunctionDecl.
- Skip = 1;
- Cn--;
- } else {
- // Ignore the first argument of operators; it's been checked above.
- i = 1;
- }
- }
- // Ignore default arguments
- unsigned n = (Fn < Cn) ? Fn : Cn;
-
- for (; i < n; ++i) {
- const ParmVarDecl *Pvd = FD->getParamDecl(i);
- const Expr *Arg = Exp->getArg(i + Skip);
- QualType Qt = Pvd->getType();
- if (Qt->isReferenceType())
- checkAccess(Arg, AK_Read, POK_PassByRef);
- }
- }
- }
+ } else {
+ examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
}
auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
@@ -2038,8 +2051,9 @@ void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
if (D && D->isCopyConstructor()) {
const Expr* Source = Exp->getArg(0);
checkAccess(Source, AK_Read);
+ } else {
+ examineArguments(D, Exp->arg_begin(), Exp->arg_end());
}
- // FIXME -- only handles constructors in DeclStmt below.
}
static CXXConstructorDecl *
diff --git a/lib/Analysis/ThreadSafetyCommon.cpp b/lib/Analysis/ThreadSafetyCommon.cpp
index 115c012a05..0c09974cf4 100644
--- a/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/lib/Analysis/ThreadSafetyCommon.cpp
@@ -235,6 +235,8 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
cast<BinaryConditionalOperator>(S), Ctx);
// We treat these as no-ops
+ case Stmt::ConstantExprClass:
+ return translate(cast<ConstantExpr>(S)->getSubExpr(), Ctx);
case Stmt::ParenExprClass:
return translate(cast<ParenExpr>(S)->getSubExpr(), Ctx);
case Stmt::ExprWithCleanupsClass:
diff --git a/lib/Basic/Attributes.cpp b/lib/Basic/Attributes.cpp
index b7570d03c8..9a8eb3d932 100644
--- a/lib/Basic/Attributes.cpp
+++ b/lib/Basic/Attributes.cpp
@@ -12,9 +12,16 @@ int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
Name = Name.substr(2, Name.size() - 4);
+ // Normalize the scope name, but only for gnu and clang attributes.
+ StringRef ScopeName = Scope ? Scope->getName() : "";
+ if (ScopeName == "__gnu__")
+ ScopeName = "gnu";
+ else if (ScopeName == "_Clang")
+ ScopeName = "clang";
+
#include "clang/Basic/AttrHasAttributeImpl.inc"
- return 0;
+ return 0;
}
const char *attr::getSubjectMatchRuleSpelling(attr::SubjectMatchRule Rule) {
diff --git a/lib/Basic/Builtins.cpp b/lib/Basic/Builtins.cpp
index a3210ba090..7e7f67ca87 100644
--- a/lib/Basic/Builtins.cpp
+++ b/lib/Basic/Builtins.cpp
@@ -68,7 +68,7 @@ bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
bool GnuModeUnsupported = !LangOpts.GNUMode && (BuiltinInfo.Langs & GNU_LANG);
bool MSModeUnsupported =
!LangOpts.MicrosoftExt && (BuiltinInfo.Langs & MS_LANG);
- bool ObjCUnsupported = !LangOpts.ObjC1 && BuiltinInfo.Langs == OBJC_LANG;
+ bool ObjCUnsupported = !LangOpts.ObjC && BuiltinInfo.Langs == OBJC_LANG;
bool OclC1Unsupported = (LangOpts.OpenCLVersion / 100) != 1 &&
(BuiltinInfo.Langs & ALL_OCLC_LANGUAGES ) == OCLC1X_LANG;
bool OclC2Unsupported = LangOpts.OpenCLVersion != 200 &&
diff --git a/lib/Basic/CMakeLists.txt b/lib/Basic/CMakeLists.txt
index f1270231e5..0015ede742 100644
--- a/lib/Basic/CMakeLists.txt
+++ b/lib/Basic/CMakeLists.txt
@@ -48,6 +48,7 @@ add_clang_library(clangBasic
Attributes.cpp
Builtins.cpp
CharInfo.cpp
+ CodeGenOptions.cpp
Cuda.cpp
Diagnostic.cpp
DiagnosticIDs.cpp
@@ -71,6 +72,7 @@ add_clang_library(clangBasic
Targets.cpp
Targets/AArch64.cpp
Targets/AMDGPU.cpp
+ Targets/ARC.cpp
Targets/ARM.cpp
Targets/AVR.cpp
Targets/BPF.cpp
@@ -94,7 +96,6 @@ add_clang_library(clangBasic
Targets/XCore.cpp
TokenKinds.cpp
Version.cpp
- VirtualFileSystem.cpp
Warnings.cpp
XRayInstr.cpp
XRayLists.cpp
diff --git a/lib/Frontend/CodeGenOptions.cpp b/lib/Basic/CodeGenOptions.cpp
index 84a39f2d57..aface1cd4b 100644
--- a/lib/Frontend/CodeGenOptions.cpp
+++ b/lib/Basic/CodeGenOptions.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include <string.h>
namespace clang {
@@ -15,7 +15,7 @@ namespace clang {
CodeGenOptions::CodeGenOptions() {
#define CODEGENOPT(Name, Bits, Default) Name = Default;
#define ENUM_CODEGENOPT(Name, Type, Bits, Default) set##Name(Default);
-#include "clang/Frontend/CodeGenOptions.def"
+#include "clang/Basic/CodeGenOptions.def"
RelocationModel = llvm::Reloc::PIC_;
memcpy(CoverageVersion, "402*", 4);
diff --git a/lib/Basic/Cuda.cpp b/lib/Basic/Cuda.cpp
index 43400c39a7..6c34856dfd 100644
--- a/lib/Basic/Cuda.cpp
+++ b/lib/Basic/Cuda.cpp
@@ -90,6 +90,12 @@ const char *CudaArchToString(CudaArch A) {
return "gfx900";
case CudaArch::GFX902: // TBA
return "gfx902";
+ case CudaArch::GFX904: // TBA
+ return "gfx904";
+ case CudaArch::GFX906: // TBA
+ return "gfx906";
+ case CudaArch::GFX909: // TBA
+ return "gfx909";
}
llvm_unreachable("invalid enum");
}
@@ -124,6 +130,9 @@ CudaArch StringToCudaArch(llvm::StringRef S) {
.Case("gfx810", CudaArch::GFX810)
.Case("gfx900", CudaArch::GFX900)
.Case("gfx902", CudaArch::GFX902)
+ .Case("gfx904", CudaArch::GFX904)
+ .Case("gfx906", CudaArch::GFX906)
+ .Case("gfx909", CudaArch::GFX909)
.Default(CudaArch::UNKNOWN);
}
@@ -233,6 +242,9 @@ CudaVirtualArch VirtualArchForCudaArch(CudaArch A) {
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
return CudaVirtualArch::COMPUTE_AMDGCN;
}
llvm_unreachable("invalid enum");
@@ -277,6 +289,9 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
return CudaVersion::CUDA_70;
}
llvm_unreachable("invalid enum");
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 88ee319b5c..ffe92e157e 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -89,6 +89,14 @@ DiagnosticsEngine::~DiagnosticsEngine() {
setClient(nullptr);
}
+void DiagnosticsEngine::dump() const {
+ DiagStatesByLoc.dump(*SourceMgr);
+}
+
+void DiagnosticsEngine::dump(StringRef DiagName) const {
+ DiagStatesByLoc.dump(*SourceMgr, DiagName);
+}
+
void DiagnosticsEngine::setClient(DiagnosticConsumer *client,
bool ShouldOwnClient) {
Owner.reset(ShouldOwnClient ? client : nullptr);
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index a45a8f239a..455d25c100 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -49,7 +49,7 @@ using namespace clang;
//===----------------------------------------------------------------------===//
FileManager::FileManager(const FileSystemOptions &FSO,
- IntrusiveRefCntPtr<vfs::FileSystem> FS)
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
: FS(std::move(FS)), FileSystemOpts(FSO), SeenDirEntries(64),
SeenFileEntries(64), NextFileUID(0) {
NumDirLookups = NumFileLookups = 0;
@@ -58,7 +58,7 @@ FileManager::FileManager(const FileSystemOptions &FSO,
// If the caller doesn't provide a virtual file system, just grab the real
// file system.
if (!this->FS)
- this->FS = vfs::getRealFileSystem();
+ this->FS = llvm::vfs::getRealFileSystem();
}
FileManager::~FileManager() = default;
@@ -221,15 +221,21 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
*SeenFileEntries.insert(std::make_pair(Filename, nullptr)).first;
// See if there is already an entry in the map.
- if (NamedFileEnt.second)
- return NamedFileEnt.second == NON_EXISTENT_FILE ? nullptr
- : NamedFileEnt.second;
+ if (NamedFileEnt.second) {
+ if (NamedFileEnt.second == NON_EXISTENT_FILE)
+ return nullptr;
+ // Entry exists: return it *unless* it wasn't opened and open is requested.
+ if (!(NamedFileEnt.second->DeferredOpen && openFile))
+ return NamedFileEnt.second;
+ // We previously stat()ed the file, but didn't open it: do that below.
+ // FIXME: the below does other redundant work too (stats the dir and file).
+ } else {
+ // By default, initialize it to invalid.
+ NamedFileEnt.second = NON_EXISTENT_FILE;
+ }
++NumFileCacheMisses;
- // By default, initialize it to invalid.
- NamedFileEnt.second = NON_EXISTENT_FILE;
-
// Get the null-terminated file name as stored as the key of the
// SeenFileEntries map.
StringRef InterndFileName = NamedFileEnt.first();
@@ -252,7 +258,7 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// FIXME: This will reduce the # syscalls.
// Nope, there isn't. Check to see if the file exists.
- std::unique_ptr<vfs::File> F;
+ std::unique_ptr<llvm::vfs::File> F;
FileData Data;
if (getStatValue(InterndFileName, Data, true, openFile ? &F : nullptr)) {
// There's no real file at the given path.
@@ -267,6 +273,7 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// It exists. See if we have already opened a file with the same inode.
// This occurs when one dir is symlinked to another, for example.
FileEntry &UFE = UniqueRealFiles[Data.UniqueID];
+ UFE.DeferredOpen = !openFile;
NamedFileEnt.second = &UFE;
@@ -283,6 +290,15 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
InterndFileName = NamedFileEnt.first().data();
}
+ // If we opened the file for the first time, record the resulting info.
+ // Do this even if the cache entry was valid, maybe we didn't previously open.
+ if (F && !UFE.File) {
+ if (auto PathName = F->getName())
+ fillRealPathName(&UFE, *PathName);
+ UFE.File = std::move(F);
+ assert(!UFE.DeferredOpen && "we just opened it!");
+ }
+
if (UFE.isValid()) { // Already have an entry with this inode, return it.
// FIXME: this hack ensures that if we look up a file by a virtual path in
@@ -313,21 +329,9 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
UFE.UniqueID = Data.UniqueID;
UFE.IsNamedPipe = Data.IsNamedPipe;
UFE.InPCH = Data.InPCH;
- UFE.File = std::move(F);
UFE.IsValid = true;
+ // Note File and DeferredOpen were initialized above.
- if (UFE.File) {
- if (auto PathName = UFE.File->getName()) {
- llvm::SmallString<128> AbsPath(*PathName);
- // This is not the same as `VFS::getRealPath()`, which resolves symlinks
- // but can be very expensive on real file systems.
- // FIXME: the semantic of RealPathName is unclear, and the name might be
- // misleading. We need to clean up the interface here.
- makeAbsolutePath(AbsPath);
- llvm::sys::path::remove_dots(AbsPath, /*remove_dot_dot=*/true);
- UFE.RealPathName = AbsPath.str();
- }
- }
return &UFE;
}
@@ -383,6 +387,7 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
UFE->UniqueID = Data.UniqueID;
UFE->IsNamedPipe = Data.IsNamedPipe;
UFE->InPCH = Data.InPCH;
+ fillRealPathName(UFE, Data.Name);
}
if (!UFE) {
@@ -398,6 +403,7 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
UFE->UID = NextFileUID++;
UFE->IsValid = true;
UFE->File.reset();
+ UFE->DeferredOpen = false;
return UFE;
}
@@ -425,6 +431,17 @@ bool FileManager::makeAbsolutePath(SmallVectorImpl<char> &Path) const {
return Changed;
}
+void FileManager::fillRealPathName(FileEntry *UFE, llvm::StringRef FileName) {
+ llvm::SmallString<128> AbsPath(FileName);
+ // This is not the same as `VFS::getRealPath()`, which resolves symlinks
+ // but can be very expensive on real file systems.
+ // FIXME: the semantic of RealPathName is unclear, and the name might be
+ // misleading. We need to clean up the interface here.
+ makeAbsolutePath(AbsPath);
+ llvm::sys::path::remove_dots(AbsPath, /*remove_dot_dot=*/true);
+ UFE->RealPathName = AbsPath.str();
+}
+
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
bool ShouldCloseOpenFile) {
@@ -475,7 +492,7 @@ FileManager::getBufferForFile(StringRef Filename, bool isVolatile) {
/// false if it's an existent real file. If FileDescriptor is NULL,
/// do directory look-up instead of file look-up.
bool FileManager::getStatValue(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F) {
+ std::unique_ptr<llvm::vfs::File> *F) {
// FIXME: FileSystemOpts shouldn't be passed in here, all paths should be
// absolute!
if (FileSystemOpts.WorkingDir.empty())
@@ -489,11 +506,11 @@ bool FileManager::getStatValue(StringRef Path, FileData &Data, bool isFile,
}
bool FileManager::getNoncachedStatValue(StringRef Path,
- vfs::Status &Result) {
+ llvm::vfs::Status &Result) {
SmallString<128> FilePath(Path);
FixupRelativePath(FilePath);
- llvm::ErrorOr<vfs::Status> S = FS->status(FilePath.c_str());
+ llvm::ErrorOr<llvm::vfs::Status> S = FS->status(FilePath.c_str());
if (!S)
return true;
Result = *S;
diff --git a/lib/Basic/FileSystemStatCache.cpp b/lib/Basic/FileSystemStatCache.cpp
index f5856cb654..9a515e89e2 100644
--- a/lib/Basic/FileSystemStatCache.cpp
+++ b/lib/Basic/FileSystemStatCache.cpp
@@ -12,17 +12,17 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/FileSystemStatCache.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <utility>
using namespace clang;
void FileSystemStatCache::anchor() {}
-static void copyStatusToFileData(const vfs::Status &Status,
+static void copyStatusToFileData(const llvm::vfs::Status &Status,
FileData &Data) {
Data.Name = Status.getName();
Data.Size = Status.getSize();
@@ -44,8 +44,9 @@ static void copyStatusToFileData(const vfs::Status &Status,
/// implementation can optionally fill in FileDescriptor with a valid
/// descriptor and the client guarantees that it will close it.
bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F,
- FileSystemStatCache *Cache, vfs::FileSystem &FS) {
+ std::unique_ptr<llvm::vfs::File> *F,
+ FileSystemStatCache *Cache,
+ llvm::vfs::FileSystem &FS) {
LookupResult R;
bool isForDir = !isFile;
@@ -55,7 +56,7 @@ bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
else if (isForDir || !F) {
// If this is a directory or a file descriptor is not needed and we have
// no cache, just go to the file system.
- llvm::ErrorOr<vfs::Status> Status = FS.status(Path);
+ llvm::ErrorOr<llvm::vfs::Status> Status = FS.status(Path);
if (!Status) {
R = CacheMissing;
} else {
@@ -79,7 +80,7 @@ bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
// Otherwise, the open succeeded. Do an fstat to get the information
// about the file. We'll end up returning the open file descriptor to the
// client to do what they please with it.
- llvm::ErrorOr<vfs::Status> Status = (*OwnedFile)->status();
+ llvm::ErrorOr<llvm::vfs::Status> Status = (*OwnedFile)->status();
if (Status) {
R = CacheExists;
copyStatusToFileData(*Status, Data);
@@ -111,7 +112,8 @@ bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
MemorizeStatCalls::LookupResult
MemorizeStatCalls::getStat(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F, vfs::FileSystem &FS) {
+ std::unique_ptr<llvm::vfs::File> *F,
+ llvm::vfs::FileSystem &FS) {
LookupResult Result = statChained(Path, Data, isFile, F, FS);
// Do not cache failed stats, it is easy to construct common inconsistent
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index 147f3e0475..b961c8333b 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -34,28 +34,6 @@
using namespace clang;
//===----------------------------------------------------------------------===//
-// IdentifierInfo Implementation
-//===----------------------------------------------------------------------===//
-
-IdentifierInfo::IdentifierInfo() {
- TokenID = tok::identifier;
- ObjCOrBuiltinID = 0;
- HasMacro = false;
- HadMacro = false;
- IsExtension = false;
- IsFutureCompatKeyword = false;
- IsPoisoned = false;
- IsCPPOperatorKeyword = false;
- NeedsHandleIdentifier = false;
- IsFromAST = false;
- ChangedAfterLoad = false;
- FEChangedAfterLoad = false;
- RevertedTokenID = false;
- OutOfDate = false;
- IsModulesImport = false;
-}
-
-//===----------------------------------------------------------------------===//
// IdentifierTable Implementation
//===----------------------------------------------------------------------===//
@@ -99,30 +77,29 @@ IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
namespace {
enum {
- KEYC99 = 0x1,
- KEYCXX = 0x2,
- KEYCXX11 = 0x4,
- KEYGNU = 0x8,
- KEYMS = 0x10,
- BOOLSUPPORT = 0x20,
- KEYALTIVEC = 0x40,
- KEYNOCXX = 0x80,
- KEYBORLAND = 0x100,
- KEYOPENCLC = 0x200,
- KEYC11 = 0x400,
- KEYARC = 0x800,
- KEYNOMS18 = 0x01000,
- KEYNOOPENCL = 0x02000,
- WCHARSUPPORT = 0x04000,
- HALFSUPPORT = 0x08000,
- CHAR8SUPPORT = 0x10000,
- KEYCONCEPTS = 0x20000,
- KEYOBJC2 = 0x40000,
- KEYZVECTOR = 0x80000,
- KEYCOROUTINES = 0x100000,
- KEYMODULES = 0x200000,
- KEYCXX2A = 0x400000,
- KEYOPENCLCXX = 0x800000,
+ KEYC99 = 0x1,
+ KEYCXX = 0x2,
+ KEYCXX11 = 0x4,
+ KEYGNU = 0x8,
+ KEYMS = 0x10,
+ BOOLSUPPORT = 0x20,
+ KEYALTIVEC = 0x40,
+ KEYNOCXX = 0x80,
+ KEYBORLAND = 0x100,
+ KEYOPENCLC = 0x200,
+ KEYC11 = 0x400,
+ KEYNOMS18 = 0x800,
+ KEYNOOPENCL = 0x1000,
+ WCHARSUPPORT = 0x2000,
+ HALFSUPPORT = 0x4000,
+ CHAR8SUPPORT = 0x8000,
+ KEYCONCEPTS = 0x10000,
+ KEYOBJC = 0x20000,
+ KEYZVECTOR = 0x40000,
+ KEYCOROUTINES = 0x80000,
+ KEYMODULES = 0x100000,
+ KEYCXX2A = 0x200000,
+ KEYOPENCLCXX = 0x400000,
KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX2A,
KEYALL = (0xffffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
@@ -155,6 +132,7 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (LangOpts.WChar && (Flags & WCHARSUPPORT)) return KS_Enabled;
if (LangOpts.Char8 && (Flags & CHAR8SUPPORT)) return KS_Enabled;
if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) return KS_Enabled;
+ if (LangOpts.ZVector && (Flags & KEYZVECTOR)) return KS_Enabled;
if (LangOpts.OpenCL && !LangOpts.OpenCLCPlusPlus && (Flags & KEYOPENCLC))
return KS_Enabled;
if (LangOpts.OpenCLCPlusPlus && (Flags & KEYOPENCLCXX)) return KS_Enabled;
@@ -162,8 +140,7 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (LangOpts.C11 && (Flags & KEYC11)) return KS_Enabled;
// We treat bridge casts as objective-C keywords so we can warn on them
// in non-arc mode.
- if (LangOpts.ObjC2 && (Flags & KEYARC)) return KS_Enabled;
- if (LangOpts.ObjC2 && (Flags & KEYOBJC2)) return KS_Enabled;
+ if (LangOpts.ObjC && (Flags & KEYOBJC)) return KS_Enabled;
if (LangOpts.ConceptsTS && (Flags & KEYCONCEPTS)) return KS_Enabled;
if (LangOpts.CoroutinesTS && (Flags & KEYCOROUTINES)) return KS_Enabled;
if (LangOpts.ModulesTS && (Flags & KEYMODULES)) return KS_Enabled;
@@ -227,11 +204,8 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
#define CXX_KEYWORD_OPERATOR(NAME, ALIAS) \
if (LangOpts.CXXOperatorNames) \
AddCXXOperatorKeyword(StringRef(#NAME), tok::ALIAS, *this);
-#define OBJC1_AT_KEYWORD(NAME) \
- if (LangOpts.ObjC1) \
- AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
-#define OBJC2_AT_KEYWORD(NAME) \
- if (LangOpts.ObjC2) \
+#define OBJC_AT_KEYWORD(NAME) \
+ if (LangOpts.ObjC) \
AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
#define TESTING_KEYWORD(NAME, FLAGS)
#include "clang/Basic/TokenKinds.def"
@@ -583,6 +557,7 @@ ObjCInstanceTypeFamily Selector::getInstTypeMethodFamily(Selector sel) {
break;
case 'i':
if (startsWithWord(name, "init")) return OIT_Init;
+ break;
default:
break;
}
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index 440151dd06..fd552f2baa 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -119,7 +119,7 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
.Case("c17", LangOpts.C17)
.Case("freestanding", LangOpts.Freestanding)
.Case("gnuinlineasm", LangOpts.GNUAsm)
- .Case("objc", LangOpts.ObjC1)
+ .Case("objc", LangOpts.ObjC)
.Case("objc_arc", LangOpts.ObjCAutoRefCount)
.Case("opencl", LangOpts.OpenCL)
.Case("tls", Target.isTLSSupported())
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index 2251d89754..dcd36c456f 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -125,6 +125,12 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
.Case(#Name, static_cast<unsigned>(OMPC_DEFAULTMAP_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_DEFAULTMAP_unknown);
+ case OMPC_atomic_default_mem_order:
+ return llvm::StringSwitch<OpenMPAtomicDefaultMemOrderClauseKind>(Str)
+#define OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(Name) \
+ .Case(#Name, OMPC_ATOMIC_DEFAULT_MEM_ORDER_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown);
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -169,6 +175,9 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
@@ -267,6 +276,16 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'schedule' clause type");
+ case OMPC_atomic_default_mem_order:
+ switch (Type) {
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown:
+ return "unknown";
+#define OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(Name) \
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+}
+ llvm_unreachable("Invalid OpenMP 'atomic_default_mem_order' clause type");
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -311,6 +330,9 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index efa6ad2493..ce8aa5d112 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -195,8 +195,7 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
}
unsigned LineTableInfo::getLineTableFilenameID(StringRef Name) {
- auto IterBool =
- FilenameIDs.insert(std::make_pair(Name, FilenamesByID.size()));
+ auto IterBool = FilenameIDs.try_emplace(Name, FilenamesByID.size());
if (IterBool.second)
FilenamesByID.push_back(&*IterBool.first);
return IterBool.first->second;
@@ -1217,65 +1216,22 @@ static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
const unsigned char *Buf = (const unsigned char *)Buffer->getBufferStart();
const unsigned char *End = (const unsigned char *)Buffer->getBufferEnd();
- unsigned Offs = 0;
+ unsigned I = 0;
while (true) {
// Skip over the contents of the line.
- const unsigned char *NextBuf = (const unsigned char *)Buf;
-
-#ifdef __SSE2__
- // Try to skip to the next newline using SSE instructions. This is very
- // performance sensitive for programs with lots of diagnostics and in -E
- // mode.
- __m128i CRs = _mm_set1_epi8('\r');
- __m128i LFs = _mm_set1_epi8('\n');
-
- // First fix up the alignment to 16 bytes.
- while (((uintptr_t)NextBuf & 0xF) != 0) {
- if (*NextBuf == '\n' || *NextBuf == '\r' || *NextBuf == '\0')
- goto FoundSpecialChar;
- ++NextBuf;
- }
-
- // Scan 16 byte chunks for '\r' and '\n'. Ignore '\0'.
- while (NextBuf+16 <= End) {
- const __m128i Chunk = *(const __m128i*)NextBuf;
- __m128i Cmp = _mm_or_si128(_mm_cmpeq_epi8(Chunk, CRs),
- _mm_cmpeq_epi8(Chunk, LFs));
- unsigned Mask = _mm_movemask_epi8(Cmp);
-
- // If we found a newline, adjust the pointer and jump to the handling code.
- if (Mask != 0) {
- NextBuf += llvm::countTrailingZeros(Mask);
- goto FoundSpecialChar;
- }
- NextBuf += 16;
- }
-#endif
-
- while (*NextBuf != '\n' && *NextBuf != '\r' && *NextBuf != '\0')
- ++NextBuf;
-
-#ifdef __SSE2__
-FoundSpecialChar:
-#endif
- Offs += NextBuf-Buf;
- Buf = NextBuf;
-
- if (Buf[0] == '\n' || Buf[0] == '\r') {
- // If this is \n\r or \r\n, skip both characters.
- if ((Buf[1] == '\n' || Buf[1] == '\r') && Buf[0] != Buf[1]) {
- ++Offs;
- ++Buf;
- }
- ++Offs;
- ++Buf;
- LineOffsets.push_back(Offs);
+ while (Buf[I] != '\n' && Buf[I] != '\r' && Buf[I] != '\0')
+ ++I;
+
+ if (Buf[I] == '\n' || Buf[I] == '\r') {
+ // If this is \r\n, skip both characters.
+ if (Buf[I] == '\r' && Buf[I+1] == '\n')
+ ++I;
+ ++I;
+ LineOffsets.push_back(I);
} else {
- // Otherwise, this is a null. If end of file, exit.
- if (Buf == End) break;
- // Otherwise, skip the null.
- ++Offs;
- ++Buf;
+ // Otherwise, this is a NUL. If end of file, exit.
+ if (Buf+I == End) break;
+ ++I;
}
}
@@ -1965,9 +1921,7 @@ SourceManager::getDecomposedIncludedLoc(FileID FID) const {
// Uses IncludedLocMap to retrieve/cache the decomposed loc.
using DecompTy = std::pair<FileID, unsigned>;
- using MapTy = llvm::DenseMap<FileID, DecompTy>;
- std::pair<MapTy::iterator, bool>
- InsertOp = IncludedLocMap.insert(std::make_pair(FID, DecompTy()));
+ auto InsertOp = IncludedLocMap.try_emplace(FID);
DecompTy &DecompLoc = InsertOp.first->second;
if (!InsertOp.second)
return DecompLoc; // already in map.
@@ -2263,8 +2217,8 @@ SourceManagerForFile::SourceManagerForFile(StringRef FileName,
StringRef Content) {
// This is referenced by `FileMgr` and will be released by `FileMgr` when it
// is deleted.
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
+ IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new llvm::vfs::InMemoryFileSystem);
InMemoryFileSystem->addFile(
FileName, 0,
llvm::MemoryBuffer::getMemBuffer(Content, FileName,
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 1ef2fe3b81..f79da4e576 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -16,6 +16,7 @@
#include "Targets/AArch64.h"
#include "Targets/AMDGPU.h"
+#include "Targets/ARC.h"
#include "Targets/ARM.h"
#include "Targets/AVR.h"
#include "Targets/BPF.h"
@@ -124,6 +125,9 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
default:
return nullptr;
+ case llvm::Triple::arc:
+ return new ARCTargetInfo(Triple, Opts);
+
case llvm::Triple::xcore:
return new XCoreTargetInfo(Triple, Opts);
@@ -495,6 +499,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new NaClTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::ELFIAMCU:
return new MCUX86_32TargetInfo(Triple, Opts);
+ case llvm::Triple::Hurd:
+ return new HurdTargetInfo<X86_32TargetInfo>(Triple, Opts);
default:
return new X86_32TargetInfo(Triple, Opts);
}
@@ -640,7 +646,7 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
Opts->Features.push_back((F.getValue() ? "+" : "-") + F.getKey().str());
// Sort here, so we handle the features in a predictable order. (This matters
// when we're dealing with features that overlap.)
- llvm::sort(Opts->Features.begin(), Opts->Features.end());
+ llvm::sort(Opts->Features);
if (!Target->handleTargetFeatures(Opts->Features, Diags))
return nullptr;
diff --git a/lib/Basic/Targets/AArch64.cpp b/lib/Basic/Targets/AArch64.cpp
index 3444591ac5..376cba6e45 100644
--- a/lib/Basic/Targets/AArch64.cpp
+++ b/lib/Basic/Targets/AArch64.cpp
@@ -122,10 +122,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
Builder.defineMacro("__aarch64__");
- // For bare-metal none-eabi.
+ // For bare-metal.
if (getTriple().getOS() == llvm::Triple::UnknownOS &&
- (getTriple().getEnvironment() == llvm::Triple::EABI ||
- getTriple().getEnvironment() == llvm::Triple::EABIHF))
+ getTriple().isOSBinFormatELF())
Builder.defineMacro("__ELF__");
// Target properties.
@@ -195,6 +194,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasDotProd)
Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
+ if ((FPU & NeonMode) && HasFP16FML)
+ Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
+
switch (ArchKind) {
default:
break;
@@ -232,6 +234,7 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
Unaligned = 1;
HasFullFP16 = 0;
HasDotProd = 0;
+ HasFP16FML = 0;
ArchKind = llvm::AArch64::ArchKind::ARMV8A;
for (const auto &Feature : Features) {
@@ -253,6 +256,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFullFP16 = 1;
if (Feature == "+dotprod")
HasDotProd = 1;
+ if (Feature == "+fp16fml")
+ HasFP16FML = 1;
}
setDataLayout();
@@ -268,6 +273,7 @@ AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_PreserveMost:
case CC_PreserveAll:
case CC_OpenCLKernel:
+ case CC_AArch64VectorCall:
case CC_Win64:
return CCCR_OK;
default:
@@ -508,6 +514,7 @@ WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_OpenCLKernel:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_Swift:
case CC_Win64:
return CCCR_OK;
default:
diff --git a/lib/Basic/Targets/AArch64.h b/lib/Basic/Targets/AArch64.h
index a9df895e4d..d7f767abd4 100644
--- a/lib/Basic/Targets/AArch64.h
+++ b/lib/Basic/Targets/AArch64.h
@@ -34,6 +34,7 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
unsigned Unaligned;
unsigned HasFullFP16;
unsigned HasDotProd;
+ unsigned HasFP16FML;
llvm::AArch64::ArchKind ArchKind;
static const Builtin::Info BuiltinInfo[];
diff --git a/lib/Basic/Targets/AMDGPU.cpp b/lib/Basic/Targets/AMDGPU.cpp
index fd267eb6ba..4f17b17ff4 100644
--- a/lib/Basic/Targets/AMDGPU.cpp
+++ b/lib/Basic/Targets/AMDGPU.cpp
@@ -13,10 +13,10 @@
#include "AMDGPU.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -138,6 +138,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
case GK_GFX906:
Features["dl-insts"] = true;
LLVM_FALLTHROUGH;
+ case GK_GFX909:
case GK_GFX904:
case GK_GFX902:
case GK_GFX900:
diff --git a/lib/Basic/Targets/ARC.cpp b/lib/Basic/Targets/ARC.cpp
new file mode 100644
index 0000000000..2159ab8e20
--- /dev/null
+++ b/lib/Basic/Targets/ARC.cpp
@@ -0,0 +1,25 @@
+//===--- ARC.cpp - Implement ARC target feature support -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ARC TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARC.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetBuiltins.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+void ARCTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__arc__");
+}
diff --git a/lib/Basic/Targets/ARC.h b/lib/Basic/Targets/ARC.h
new file mode 100644
index 0000000000..ee20568f3d
--- /dev/null
+++ b/lib/Basic/Targets/ARC.h
@@ -0,0 +1,74 @@
+//===--- ARC.h - Declare ARC target feature support -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares ARC TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_ARC_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_ARC_H
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace targets {
+
+class LLVM_LIBRARY_VISIBILITY ARCTargetInfo : public TargetInfo {
+public:
+ ARCTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ : TargetInfo(Triple) {
+ NoAsmVariants = true;
+ LongLongAlign = 32;
+ SuitableAlign = 32;
+ DoubleAlign = LongDoubleAlign = 32;
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ UseZeroLengthBitfieldAlignment = true;
+ resetDataLayout("e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-"
+ "i32:32:32-f32:32:32-i64:32-f64:32-a:0:32-n32");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+
+ const char *getClobbers() const override { return ""; }
+
+ ArrayRef<const char *> getGCCRegNames() const override {
+ static const char *const GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "gp", "sp", "fp", "ilink1", "r30", "blink"};
+ return llvm::makeArrayRef(GCCRegNames);
+ }
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return false;
+ }
+};
+
+} // namespace targets
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_ARC_H
diff --git a/lib/Basic/Targets/ARM.cpp b/lib/Basic/Targets/ARM.cpp
index 5345d1f8e9..cb202eac98 100644
--- a/lib/Basic/Targets/ARM.cpp
+++ b/lib/Basic/Targets/ARM.cpp
@@ -996,6 +996,7 @@ WindowsARMTargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_OpenCLKernel:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_Swift:
return CCCR_OK;
default:
return CCCR_Warning;
diff --git a/lib/Basic/Targets/Hexagon.cpp b/lib/Basic/Targets/Hexagon.cpp
index 0ef1f6db28..94e1388e38 100644
--- a/lib/Basic/Targets/Hexagon.cpp
+++ b/lib/Basic/Targets/Hexagon.cpp
@@ -25,14 +25,7 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__qdsp6__", "1");
Builder.defineMacro("__hexagon__", "1");
- if (CPU == "hexagonv4") {
- Builder.defineMacro("__HEXAGON_V4__");
- Builder.defineMacro("__HEXAGON_ARCH__", "4");
- if (Opts.HexagonQdsp6Compat) {
- Builder.defineMacro("__QDSP6_V4__");
- Builder.defineMacro("__QDSP6_ARCH__", "4");
- }
- } else if (CPU == "hexagonv5") {
+ if (CPU == "hexagonv5") {
Builder.defineMacro("__HEXAGON_V5__");
Builder.defineMacro("__HEXAGON_ARCH__", "5");
if (Opts.HexagonQdsp6Compat) {
@@ -55,6 +48,9 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
} else if (CPU == "hexagonv65") {
Builder.defineMacro("__HEXAGON_V65__");
Builder.defineMacro("__HEXAGON_ARCH__", "65");
+ } else if (CPU == "hexagonv66") {
+ Builder.defineMacro("__HEXAGON_V66__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "66");
}
if (hasFeature("hvx-length64b")) {
@@ -150,9 +146,9 @@ struct CPUSuffix {
};
static constexpr CPUSuffix Suffixes[] = {
- {{"hexagonv4"}, {"4"}}, {{"hexagonv5"}, {"5"}},
- {{"hexagonv55"}, {"55"}}, {{"hexagonv60"}, {"60"}},
- {{"hexagonv62"}, {"62"}}, {{"hexagonv65"}, {"65"}},
+ {{"hexagonv5"}, {"5"}}, {{"hexagonv55"}, {"55"}},
+ {{"hexagonv60"}, {"60"}}, {{"hexagonv62"}, {"62"}},
+ {{"hexagonv65"}, {"65"}}, {{"hexagonv66"}, {"66"}},
};
const char *HexagonTargetInfo::getHexagonCPUSuffix(StringRef Name) {
diff --git a/lib/Basic/Targets/Mips.h b/lib/Basic/Targets/Mips.h
index fd1db5aa23..f20780915d 100644
--- a/lib/Basic/Targets/Mips.h
+++ b/lib/Basic/Targets/Mips.h
@@ -69,7 +69,12 @@ public:
UseIndirectJumpHazard(false), FPMode(FPXX) {
TheCXXABI.set(TargetCXXABI::GenericMIPS);
- setABI(getTriple().isMIPS32() ? "o32" : "n64");
+ if (Triple.isMIPS32())
+ setABI("o32");
+ else if (Triple.getEnvironment() == llvm::Triple::GNUABIN32)
+ setABI("n32");
+ else
+ setABI("n64");
CPU = ABI == "o32" ? "mips32r2" : "mips64r2";
diff --git a/lib/Basic/Targets/NVPTX.cpp b/lib/Basic/Targets/NVPTX.cpp
index 2af28e34bd..ca41c4d14c 100644
--- a/lib/Basic/Targets/NVPTX.cpp
+++ b/lib/Basic/Targets/NVPTX.cpp
@@ -188,6 +188,9 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
case CudaArch::LAST:
break;
case CudaArch::UNKNOWN:
diff --git a/lib/Basic/Targets/OSTargets.cpp b/lib/Basic/Targets/OSTargets.cpp
index 50abd4ce0c..6252a51ef7 100644
--- a/lib/Basic/Targets/OSTargets.cpp
+++ b/lib/Basic/Targets/OSTargets.cpp
@@ -33,7 +33,7 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("_FORTIFY_SOURCE", "0");
// Darwin defines __weak, __strong, and __unsafe_unretained even in C mode.
- if (!Opts.ObjC1) {
+ if (!Opts.ObjC) {
// __weak is always defined, for use in blocks and with objc pointers.
Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
Builder.defineMacro("__strong", "");
diff --git a/lib/Basic/Targets/OSTargets.h b/lib/Basic/Targets/OSTargets.h
index 48da6dc950..42ce89669f 100644
--- a/lib/Basic/Targets/OSTargets.h
+++ b/lib/Basic/Targets/OSTargets.h
@@ -133,6 +133,15 @@ public:
/// is very similar to ELF's "protected"; Darwin requires a "weak"
/// attribute on declarations that can be dynamically replaced.
bool hasProtectedVisibility() const override { return false; }
+
+ TargetInfo::IntType getLeastIntTypeByWidth(unsigned BitWidth,
+ bool IsSigned) const final {
+ // Darwin uses `long long` for `int_least64_t` and `int_fast64_t`.
+ return BitWidth == 64
+ ? (IsSigned ? TargetInfo::SignedLongLong
+ : TargetInfo::UnsignedLongLong)
+ : TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned);
+ }
};
// DragonFlyBSD Target
@@ -257,6 +266,8 @@ protected:
Builder.defineMacro("__HAIKU__");
Builder.defineMacro("__ELF__");
DefineStd(Builder, "unix", Opts);
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
}
public:
@@ -267,9 +278,40 @@ public:
this->PtrDiffType = TargetInfo::SignedLong;
this->ProcessIDType = TargetInfo::SignedLong;
this->TLSSupported = false;
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->HasFloat128 = true;
+ break;
+ }
}
};
+// Hurd target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY HurdTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Hurd defines; list based off of gcc output.
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__GNU__");
+ Builder.defineMacro("__gnu_hurd__");
+ Builder.defineMacro("__MACH__");
+ Builder.defineMacro("__GLIBC__");
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+public:
+ HurdTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OSTargetInfo<Target>(Triple, Opts) {}
+};
+
// Minix Target
template <typename Target>
class LLVM_LIBRARY_VISIBILITY MinixTargetInfo : public OSTargetInfo<Target> {
@@ -341,7 +383,6 @@ public:
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- case llvm::Triple::systemz:
this->HasFloat128 = true;
break;
}
@@ -397,7 +438,7 @@ public:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
this->HasFloat128 = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
default:
this->MCountName = "__mcount";
break;
diff --git a/lib/Basic/Targets/PPC.cpp b/lib/Basic/Targets/PPC.cpp
index b4eb3b1b97..db54fcff0d 100644
--- a/lib/Basic/Targets/PPC.cpp
+++ b/lib/Basic/Targets/PPC.cpp
@@ -412,6 +412,36 @@ ArrayRef<TargetInfo::GCCRegAlias> PPCTargetInfo::getGCCRegAliases() const {
return llvm::makeArrayRef(GCCRegAliases);
}
+// PPC ELFABIv2 DWARF Definitoin "Table 2.26. Mappings of Common Registers".
+// vs0 ~ vs31 is mapping to 32 - 63,
+// vs32 ~ vs63 is mapping to 77 - 108.
+const TargetInfo::AddlRegName GCCAddlRegNames[] = {
+ // Table of additional register names to use in user input.
+ {{"vs0"}, 32}, {{"vs1"}, 33}, {{"vs2"}, 34}, {{"vs3"}, 35},
+ {{"vs4"}, 36}, {{"vs5"}, 37}, {{"vs6"}, 38}, {{"vs7"}, 39},
+ {{"vs8"}, 40}, {{"vs9"}, 41}, {{"vs10"}, 42}, {{"vs11"}, 43},
+ {{"vs12"}, 44}, {{"vs13"}, 45}, {{"vs14"}, 46}, {{"vs15"}, 47},
+ {{"vs16"}, 48}, {{"vs17"}, 49}, {{"vs18"}, 50}, {{"vs19"}, 51},
+ {{"vs20"}, 52}, {{"vs21"}, 53}, {{"vs22"}, 54}, {{"vs23"}, 55},
+ {{"vs24"}, 56}, {{"vs25"}, 57}, {{"vs26"}, 58}, {{"vs27"}, 59},
+ {{"vs28"}, 60}, {{"vs29"}, 61}, {{"vs30"}, 62}, {{"vs31"}, 63},
+ {{"vs32"}, 77}, {{"vs33"}, 78}, {{"vs34"}, 79}, {{"vs35"}, 80},
+ {{"vs36"}, 81}, {{"vs37"}, 82}, {{"vs38"}, 83}, {{"vs39"}, 84},
+ {{"vs40"}, 85}, {{"vs41"}, 86}, {{"vs42"}, 87}, {{"vs43"}, 88},
+ {{"vs44"}, 89}, {{"vs45"}, 90}, {{"vs46"}, 91}, {{"vs47"}, 92},
+ {{"vs48"}, 93}, {{"vs49"}, 94}, {{"vs50"}, 95}, {{"vs51"}, 96},
+ {{"vs52"}, 97}, {{"vs53"}, 98}, {{"vs54"}, 99}, {{"vs55"}, 100},
+ {{"vs56"}, 101}, {{"vs57"}, 102}, {{"vs58"}, 103}, {{"vs59"}, 104},
+ {{"vs60"}, 105}, {{"vs61"}, 106}, {{"vs62"}, 107}, {{"vs63"}, 108},
+};
+
+ArrayRef<TargetInfo::AddlRegName> PPCTargetInfo::getGCCAddlRegNames() const {
+ if (ABI == "elfv2")
+ return llvm::makeArrayRef(GCCAddlRegNames);
+ else
+ return TargetInfo::getGCCAddlRegNames();
+}
+
static constexpr llvm::StringLiteral ValidCPUNames[] = {
{"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
{"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
diff --git a/lib/Basic/Targets/PPC.h b/lib/Basic/Targets/PPC.h
index 439c73a0e3..30f13c919c 100644
--- a/lib/Basic/Targets/PPC.h
+++ b/lib/Basic/Targets/PPC.h
@@ -176,6 +176,8 @@ public:
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+ ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
switch (*Name) {
@@ -201,6 +203,7 @@ public:
case 's': // VSX vector register to hold scalar float data
case 'a': // Any VSX register
case 'c': // An individual CR bit
+ case 'i': // FP or VSX register to hold 64-bit integers data
break;
default:
return false;
diff --git a/lib/Basic/Targets/X86.cpp b/lib/Basic/Targets/X86.cpp
index 94d1112401..53b4c153e9 100644
--- a/lib/Basic/Targets/X86.cpp
+++ b/lib/Basic/Targets/X86.cpp
@@ -142,7 +142,6 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "gfni", true);
setFeatureEnabledImpl(Features, "vpclmulqdq", true);
setFeatureEnabledImpl(Features, "avx512bitalg", true);
- setFeatureEnabledImpl(Features, "avx512vnni", true);
setFeatureEnabledImpl(Features, "avx512vbmi2", true);
setFeatureEnabledImpl(Features, "avx512vpopcntdq", true);
setFeatureEnabledImpl(Features, "rdpid", true);
@@ -152,6 +151,12 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512vbmi", true);
setFeatureEnabledImpl(Features, "sha", true);
LLVM_FALLTHROUGH;
+ case CK_Cascadelake:
+ //Cannonlake has no VNNI feature inside while Icelake has
+ if (Kind != CK_Cannonlake)
+ // CLK inherits all SKX features plus AVX512_VNNI
+ setFeatureEnabledImpl(Features, "avx512vnni", true);
+ LLVM_FALLTHROUGH;
case CK_SkylakeServer:
setFeatureEnabledImpl(Features, "avx512f", true);
setFeatureEnabledImpl(Features, "avx512cd", true);
@@ -166,10 +171,11 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "xsavec", true);
setFeatureEnabledImpl(Features, "xsaves", true);
setFeatureEnabledImpl(Features, "mpx", true);
- if (Kind != CK_SkylakeServer) // SKX inherits all SKL features, except SGX
+ if (Kind != CK_SkylakeServer
+ && Kind != CK_Cascadelake)
+ // SKX/CLX inherits all SKL features, except SGX
setFeatureEnabledImpl(Features, "sgx", true);
setFeatureEnabledImpl(Features, "clflushopt", true);
- setFeatureEnabledImpl(Features, "rtm", true);
setFeatureEnabledImpl(Features, "aes", true);
LLVM_FALLTHROUGH;
case CK_Broadwell:
@@ -281,7 +287,6 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "rtm", true);
setFeatureEnabledImpl(Features, "fma", true);
setFeatureEnabledImpl(Features, "rdrnd", true);
setFeatureEnabledImpl(Features, "f16c", true);
@@ -860,6 +865,11 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
/// definitions for this particular subtarget.
void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ std::string CodeModel = getTargetOpts().CodeModel;
+ if (CodeModel == "default")
+ CodeModel = "small";
+ Builder.defineMacro("__code_model_" + CodeModel + "_");
+
// Target identification.
if (getTriple().getArch() == llvm::Triple::x86_64) {
Builder.defineMacro("__amd64__");
@@ -946,6 +956,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Broadwell:
case CK_SkylakeClient:
case CK_SkylakeServer:
+ case CK_Cascadelake:
case CK_Cannonlake:
case CK_IcelakeClient:
case CK_IcelakeServer:
@@ -1678,6 +1689,7 @@ bool X86TargetInfo::validateOperandSize(StringRef Constraint,
return false;
break;
}
+ LLVM_FALLTHROUGH;
case 'v':
case 'x':
if (SSELevel >= AVX512F)
diff --git a/lib/Basic/Targets/X86.h b/lib/Basic/Targets/X86.h
index a77757af67..05930ae9ee 100644
--- a/lib/Basic/Targets/X86.h
+++ b/lib/Basic/Targets/X86.h
@@ -225,6 +225,7 @@ public:
case 'Y':
if ((++I != E) && ((*I == '0') || (*I == 'z')))
return "xmm0";
+ break;
default:
break;
}
@@ -290,9 +291,6 @@ public:
return checkCPUKind(CPU = getCPUKind(Name));
}
- bool supportsMultiVersioning() const override {
- return getTriple().isOSBinFormatELF();
- }
unsigned multiVersionSortPriority(StringRef Name) const override;
bool setFPMath(StringRef Name) override;
diff --git a/lib/Basic/VirtualFileSystem.cpp b/lib/Basic/VirtualFileSystem.cpp
deleted file mode 100644
index 6d96f16a79..0000000000
--- a/lib/Basic/VirtualFileSystem.cpp
+++ /dev/null
@@ -1,2142 +0,0 @@
-//===- VirtualFileSystem.cpp - Virtual File System Layer ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the VirtualFileSystem interface.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Basic/VirtualFileSystem.h"
-#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSet.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/ADT/iterator_range.h"
-#include "llvm/Config/llvm-config.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/Chrono.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Errc.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Path.h"
-#include "llvm/Support/Process.h"
-#include "llvm/Support/SMLoc.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/YAMLParser.h"
-#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <iterator>
-#include <limits>
-#include <map>
-#include <memory>
-#include <mutex>
-#include <string>
-#include <system_error>
-#include <utility>
-#include <vector>
-
-using namespace clang;
-using namespace vfs;
-using namespace llvm;
-
-using llvm::sys::fs::file_status;
-using llvm::sys::fs::file_type;
-using llvm::sys::fs::perms;
-using llvm::sys::fs::UniqueID;
-
-Status::Status(const file_status &Status)
- : UID(Status.getUniqueID()), MTime(Status.getLastModificationTime()),
- User(Status.getUser()), Group(Status.getGroup()), Size(Status.getSize()),
- Type(Status.type()), Perms(Status.permissions()) {}
-
-Status::Status(StringRef Name, UniqueID UID, sys::TimePoint<> MTime,
- uint32_t User, uint32_t Group, uint64_t Size, file_type Type,
- perms Perms)
- : Name(Name), UID(UID), MTime(MTime), User(User), Group(Group), Size(Size),
- Type(Type), Perms(Perms) {}
-
-Status Status::copyWithNewName(const Status &In, StringRef NewName) {
- return Status(NewName, In.getUniqueID(), In.getLastModificationTime(),
- In.getUser(), In.getGroup(), In.getSize(), In.getType(),
- In.getPermissions());
-}
-
-Status Status::copyWithNewName(const file_status &In, StringRef NewName) {
- return Status(NewName, In.getUniqueID(), In.getLastModificationTime(),
- In.getUser(), In.getGroup(), In.getSize(), In.type(),
- In.permissions());
-}
-
-bool Status::equivalent(const Status &Other) const {
- assert(isStatusKnown() && Other.isStatusKnown());
- return getUniqueID() == Other.getUniqueID();
-}
-
-bool Status::isDirectory() const {
- return Type == file_type::directory_file;
-}
-
-bool Status::isRegularFile() const {
- return Type == file_type::regular_file;
-}
-
-bool Status::isOther() const {
- return exists() && !isRegularFile() && !isDirectory() && !isSymlink();
-}
-
-bool Status::isSymlink() const {
- return Type == file_type::symlink_file;
-}
-
-bool Status::isStatusKnown() const {
- return Type != file_type::status_error;
-}
-
-bool Status::exists() const {
- return isStatusKnown() && Type != file_type::file_not_found;
-}
-
-File::~File() = default;
-
-FileSystem::~FileSystem() = default;
-
-ErrorOr<std::unique_ptr<MemoryBuffer>>
-FileSystem::getBufferForFile(const llvm::Twine &Name, int64_t FileSize,
- bool RequiresNullTerminator, bool IsVolatile) {
- auto F = openFileForRead(Name);
- if (!F)
- return F.getError();
-
- return (*F)->getBuffer(Name, FileSize, RequiresNullTerminator, IsVolatile);
-}
-
-std::error_code FileSystem::makeAbsolute(SmallVectorImpl<char> &Path) const {
- if (llvm::sys::path::is_absolute(Path))
- return {};
-
- auto WorkingDir = getCurrentWorkingDirectory();
- if (!WorkingDir)
- return WorkingDir.getError();
-
- return llvm::sys::fs::make_absolute(WorkingDir.get(), Path);
-}
-
-std::error_code FileSystem::getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const {
- return errc::operation_not_permitted;
-}
-
-bool FileSystem::exists(const Twine &Path) {
- auto Status = status(Path);
- return Status && Status->exists();
-}
-
-#ifndef NDEBUG
-static bool isTraversalComponent(StringRef Component) {
- return Component.equals("..") || Component.equals(".");
-}
-
-static bool pathHasTraversal(StringRef Path) {
- using namespace llvm::sys;
-
- for (StringRef Comp : llvm::make_range(path::begin(Path), path::end(Path)))
- if (isTraversalComponent(Comp))
- return true;
- return false;
-}
-#endif
-
-//===-----------------------------------------------------------------------===/
-// RealFileSystem implementation
-//===-----------------------------------------------------------------------===/
-
-namespace {
-
-/// Wrapper around a raw file descriptor.
-class RealFile : public File {
- friend class RealFileSystem;
-
- int FD;
- Status S;
- std::string RealName;
-
- RealFile(int FD, StringRef NewName, StringRef NewRealPathName)
- : FD(FD), S(NewName, {}, {}, {}, {}, {},
- llvm::sys::fs::file_type::status_error, {}),
- RealName(NewRealPathName.str()) {
- assert(FD >= 0 && "Invalid or inactive file descriptor");
- }
-
-public:
- ~RealFile() override;
-
- ErrorOr<Status> status() override;
- ErrorOr<std::string> getName() override;
- ErrorOr<std::unique_ptr<MemoryBuffer>> getBuffer(const Twine &Name,
- int64_t FileSize,
- bool RequiresNullTerminator,
- bool IsVolatile) override;
- std::error_code close() override;
-};
-
-} // namespace
-
-RealFile::~RealFile() { close(); }
-
-ErrorOr<Status> RealFile::status() {
- assert(FD != -1 && "cannot stat closed file");
- if (!S.isStatusKnown()) {
- file_status RealStatus;
- if (std::error_code EC = sys::fs::status(FD, RealStatus))
- return EC;
- S = Status::copyWithNewName(RealStatus, S.getName());
- }
- return S;
-}
-
-ErrorOr<std::string> RealFile::getName() {
- return RealName.empty() ? S.getName().str() : RealName;
-}
-
-ErrorOr<std::unique_ptr<MemoryBuffer>>
-RealFile::getBuffer(const Twine &Name, int64_t FileSize,
- bool RequiresNullTerminator, bool IsVolatile) {
- assert(FD != -1 && "cannot get buffer for closed file");
- return MemoryBuffer::getOpenFile(FD, Name, FileSize, RequiresNullTerminator,
- IsVolatile);
-}
-
-std::error_code RealFile::close() {
- std::error_code EC = sys::Process::SafelyCloseFileDescriptor(FD);
- FD = -1;
- return EC;
-}
-
-namespace {
-
-/// The file system according to your operating system.
-class RealFileSystem : public FileSystem {
-public:
- ErrorOr<Status> status(const Twine &Path) override;
- ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
- directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
-
- llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;
- std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
- std::error_code getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const override;
-private:
- mutable std::mutex CWDMutex;
- mutable std::string CWDCache;
-};
-
-} // namespace
-
-ErrorOr<Status> RealFileSystem::status(const Twine &Path) {
- sys::fs::file_status RealStatus;
- if (std::error_code EC = sys::fs::status(Path, RealStatus))
- return EC;
- return Status::copyWithNewName(RealStatus, Path.str());
-}
-
-ErrorOr<std::unique_ptr<File>>
-RealFileSystem::openFileForRead(const Twine &Name) {
- int FD;
- SmallString<256> RealName;
- if (std::error_code EC =
- sys::fs::openFileForRead(Name, FD, sys::fs::OF_None, &RealName))
- return EC;
- return std::unique_ptr<File>(new RealFile(FD, Name.str(), RealName.str()));
-}
-
-llvm::ErrorOr<std::string> RealFileSystem::getCurrentWorkingDirectory() const {
- std::lock_guard<std::mutex> Lock(CWDMutex);
- if (!CWDCache.empty())
- return CWDCache;
- SmallString<256> Dir;
- if (std::error_code EC = llvm::sys::fs::current_path(Dir))
- return EC;
- CWDCache = Dir.str();
- return CWDCache;
-}
-
-std::error_code RealFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
- // FIXME: chdir is thread hostile; on the other hand, creating the same
- // behavior as chdir is complex: chdir resolves the path once, thus
- // guaranteeing that all subsequent relative path operations work
- // on the same path the original chdir resulted in. This makes a
- // difference for example on network filesystems, where symlinks might be
- // switched during runtime of the tool. Fixing this depends on having a
- // file system abstraction that allows openat() style interactions.
- if (auto EC = llvm::sys::fs::set_current_path(Path))
- return EC;
-
- // Invalidate cache.
- std::lock_guard<std::mutex> Lock(CWDMutex);
- CWDCache.clear();
- return std::error_code();
-}
-
-std::error_code
-RealFileSystem::getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const {
- return llvm::sys::fs::real_path(Path, Output);
-}
-
-IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
- static IntrusiveRefCntPtr<FileSystem> FS = new RealFileSystem();
- return FS;
-}
-
-namespace {
-
-class RealFSDirIter : public clang::vfs::detail::DirIterImpl {
- llvm::sys::fs::directory_iterator Iter;
-
-public:
- RealFSDirIter(const Twine &Path, std::error_code &EC) : Iter(Path, EC) {
- if (Iter != llvm::sys::fs::directory_iterator())
- CurrentEntry = directory_entry(Iter->path(), Iter->type());
- }
-
- std::error_code increment() override {
- std::error_code EC;
- Iter.increment(EC);
- CurrentEntry = (Iter == llvm::sys::fs::directory_iterator())
- ? directory_entry()
- : directory_entry(Iter->path(), Iter->type());
- return EC;
- }
-};
-
-} // namespace
-
-directory_iterator RealFileSystem::dir_begin(const Twine &Dir,
- std::error_code &EC) {
- return directory_iterator(std::make_shared<RealFSDirIter>(Dir, EC));
-}
-
-//===-----------------------------------------------------------------------===/
-// OverlayFileSystem implementation
-//===-----------------------------------------------------------------------===/
-
-OverlayFileSystem::OverlayFileSystem(IntrusiveRefCntPtr<FileSystem> BaseFS) {
- FSList.push_back(std::move(BaseFS));
-}
-
-void OverlayFileSystem::pushOverlay(IntrusiveRefCntPtr<FileSystem> FS) {
- FSList.push_back(FS);
- // Synchronize added file systems by duplicating the working directory from
- // the first one in the list.
- FS->setCurrentWorkingDirectory(getCurrentWorkingDirectory().get());
-}
-
-ErrorOr<Status> OverlayFileSystem::status(const Twine &Path) {
- // FIXME: handle symlinks that cross file systems
- for (iterator I = overlays_begin(), E = overlays_end(); I != E; ++I) {
- ErrorOr<Status> Status = (*I)->status(Path);
- if (Status || Status.getError() != llvm::errc::no_such_file_or_directory)
- return Status;
- }
- return make_error_code(llvm::errc::no_such_file_or_directory);
-}
-
-ErrorOr<std::unique_ptr<File>>
-OverlayFileSystem::openFileForRead(const llvm::Twine &Path) {
- // FIXME: handle symlinks that cross file systems
- for (iterator I = overlays_begin(), E = overlays_end(); I != E; ++I) {
- auto Result = (*I)->openFileForRead(Path);
- if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
- return Result;
- }
- return make_error_code(llvm::errc::no_such_file_or_directory);
-}
-
-llvm::ErrorOr<std::string>
-OverlayFileSystem::getCurrentWorkingDirectory() const {
- // All file systems are synchronized, just take the first working directory.
- return FSList.front()->getCurrentWorkingDirectory();
-}
-
-std::error_code
-OverlayFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
- for (auto &FS : FSList)
- if (std::error_code EC = FS->setCurrentWorkingDirectory(Path))
- return EC;
- return {};
-}
-
-std::error_code
-OverlayFileSystem::getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const {
- for (auto &FS : FSList)
- if (FS->exists(Path))
- return FS->getRealPath(Path, Output);
- return errc::no_such_file_or_directory;
-}
-
-clang::vfs::detail::DirIterImpl::~DirIterImpl() = default;
-
-namespace {
-
-class OverlayFSDirIterImpl : public clang::vfs::detail::DirIterImpl {
- OverlayFileSystem &Overlays;
- std::string Path;
- OverlayFileSystem::iterator CurrentFS;
- directory_iterator CurrentDirIter;
- llvm::StringSet<> SeenNames;
-
- std::error_code incrementFS() {
- assert(CurrentFS != Overlays.overlays_end() && "incrementing past end");
- ++CurrentFS;
- for (auto E = Overlays.overlays_end(); CurrentFS != E; ++CurrentFS) {
- std::error_code EC;
- CurrentDirIter = (*CurrentFS)->dir_begin(Path, EC);
- if (EC && EC != errc::no_such_file_or_directory)
- return EC;
- if (CurrentDirIter != directory_iterator())
- break; // found
- }
- return {};
- }
-
- std::error_code incrementDirIter(bool IsFirstTime) {
- assert((IsFirstTime || CurrentDirIter != directory_iterator()) &&
- "incrementing past end");
- std::error_code EC;
- if (!IsFirstTime)
- CurrentDirIter.increment(EC);
- if (!EC && CurrentDirIter == directory_iterator())
- EC = incrementFS();
- return EC;
- }
-
- std::error_code incrementImpl(bool IsFirstTime) {
- while (true) {
- std::error_code EC = incrementDirIter(IsFirstTime);
- if (EC || CurrentDirIter == directory_iterator()) {
- CurrentEntry = directory_entry();
- return EC;
- }
- CurrentEntry = *CurrentDirIter;
- StringRef Name = llvm::sys::path::filename(CurrentEntry.path());
- if (SeenNames.insert(Name).second)
- return EC; // name not seen before
- }
- llvm_unreachable("returned above");
- }
-
-public:
- OverlayFSDirIterImpl(const Twine &Path, OverlayFileSystem &FS,
- std::error_code &EC)
- : Overlays(FS), Path(Path.str()), CurrentFS(Overlays.overlays_begin()) {
- CurrentDirIter = (*CurrentFS)->dir_begin(Path, EC);
- EC = incrementImpl(true);
- }
-
- std::error_code increment() override { return incrementImpl(false); }
-};
-
-} // namespace
-
-directory_iterator OverlayFileSystem::dir_begin(const Twine &Dir,
- std::error_code &EC) {
- return directory_iterator(
- std::make_shared<OverlayFSDirIterImpl>(Dir, *this, EC));
-}
-
-namespace clang {
-namespace vfs {
-
-namespace detail {
-
-enum InMemoryNodeKind { IME_File, IME_Directory, IME_HardLink };
-
-/// The in memory file system is a tree of Nodes. Every node can either be a
-/// file , hardlink or a directory.
-class InMemoryNode {
- InMemoryNodeKind Kind;
- std::string FileName;
-
-public:
- InMemoryNode(llvm::StringRef FileName, InMemoryNodeKind Kind)
- : Kind(Kind), FileName(llvm::sys::path::filename(FileName)) {}
- virtual ~InMemoryNode() = default;
-
- /// Get the filename of this node (the name without the directory part).
- StringRef getFileName() const { return FileName; }
- InMemoryNodeKind getKind() const { return Kind; }
- virtual std::string toString(unsigned Indent) const = 0;
-};
-
-class InMemoryFile : public InMemoryNode {
- Status Stat;
- std::unique_ptr<llvm::MemoryBuffer> Buffer;
-
-public:
- InMemoryFile(Status Stat, std::unique_ptr<llvm::MemoryBuffer> Buffer)
- : InMemoryNode(Stat.getName(), IME_File), Stat(std::move(Stat)),
- Buffer(std::move(Buffer)) {}
-
- /// Return the \p Status for this node. \p RequestedName should be the name
- /// through which the caller referred to this node. It will override
- /// \p Status::Name in the return value, to mimic the behavior of \p RealFile.
- Status getStatus(StringRef RequestedName) const {
- return Status::copyWithNewName(Stat, RequestedName);
- }
- llvm::MemoryBuffer *getBuffer() const { return Buffer.get(); }
-
- std::string toString(unsigned Indent) const override {
- return (std::string(Indent, ' ') + Stat.getName() + "\n").str();
- }
-
- static bool classof(const InMemoryNode *N) {
- return N->getKind() == IME_File;
- }
-};
-
-namespace {
-
-class InMemoryHardLink : public InMemoryNode {
- const InMemoryFile &ResolvedFile;
-
-public:
- InMemoryHardLink(StringRef Path, const InMemoryFile &ResolvedFile)
- : InMemoryNode(Path, IME_HardLink), ResolvedFile(ResolvedFile) {}
- const InMemoryFile &getResolvedFile() const { return ResolvedFile; }
-
- std::string toString(unsigned Indent) const override {
- return std::string(Indent, ' ') + "HardLink to -> " +
- ResolvedFile.toString(0);
- }
-
- static bool classof(const InMemoryNode *N) {
- return N->getKind() == IME_HardLink;
- }
-};
-
-/// Adapt a InMemoryFile for VFS' File interface. The goal is to make
-/// \p InMemoryFileAdaptor mimic as much as possible the behavior of
-/// \p RealFile.
-class InMemoryFileAdaptor : public File {
- const InMemoryFile &Node;
- /// The name to use when returning a Status for this file.
- std::string RequestedName;
-
-public:
- explicit InMemoryFileAdaptor(const InMemoryFile &Node,
- std::string RequestedName)
- : Node(Node), RequestedName(std::move(RequestedName)) {}
-
- llvm::ErrorOr<Status> status() override {
- return Node.getStatus(RequestedName);
- }
-
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
- getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
- bool IsVolatile) override {
- llvm::MemoryBuffer *Buf = Node.getBuffer();
- return llvm::MemoryBuffer::getMemBuffer(
- Buf->getBuffer(), Buf->getBufferIdentifier(), RequiresNullTerminator);
- }
-
- std::error_code close() override { return {}; }
-};
-} // namespace
-
-class InMemoryDirectory : public InMemoryNode {
- Status Stat;
- llvm::StringMap<std::unique_ptr<InMemoryNode>> Entries;
-
-public:
- InMemoryDirectory(Status Stat)
- : InMemoryNode(Stat.getName(), IME_Directory), Stat(std::move(Stat)) {}
-
- /// Return the \p Status for this node. \p RequestedName should be the name
- /// through which the caller referred to this node. It will override
- /// \p Status::Name in the return value, to mimic the behavior of \p RealFile.
- Status getStatus(StringRef RequestedName) const {
- return Status::copyWithNewName(Stat, RequestedName);
- }
- InMemoryNode *getChild(StringRef Name) {
- auto I = Entries.find(Name);
- if (I != Entries.end())
- return I->second.get();
- return nullptr;
- }
-
- InMemoryNode *addChild(StringRef Name, std::unique_ptr<InMemoryNode> Child) {
- return Entries.insert(make_pair(Name, std::move(Child)))
- .first->second.get();
- }
-
- using const_iterator = decltype(Entries)::const_iterator;
-
- const_iterator begin() const { return Entries.begin(); }
- const_iterator end() const { return Entries.end(); }
-
- std::string toString(unsigned Indent) const override {
- std::string Result =
- (std::string(Indent, ' ') + Stat.getName() + "\n").str();
- for (const auto &Entry : Entries)
- Result += Entry.second->toString(Indent + 2);
- return Result;
- }
-
- static bool classof(const InMemoryNode *N) {
- return N->getKind() == IME_Directory;
- }
-};
-
-namespace {
-Status getNodeStatus(const InMemoryNode *Node, StringRef RequestedName) {
- if (auto Dir = dyn_cast<detail::InMemoryDirectory>(Node))
- return Dir->getStatus(RequestedName);
- if (auto File = dyn_cast<detail::InMemoryFile>(Node))
- return File->getStatus(RequestedName);
- if (auto Link = dyn_cast<detail::InMemoryHardLink>(Node))
- return Link->getResolvedFile().getStatus(RequestedName);
- llvm_unreachable("Unknown node type");
-}
-} // namespace
-} // namespace detail
-
-InMemoryFileSystem::InMemoryFileSystem(bool UseNormalizedPaths)
- : Root(new detail::InMemoryDirectory(
- Status("", getNextVirtualUniqueID(), llvm::sys::TimePoint<>(), 0, 0,
- 0, llvm::sys::fs::file_type::directory_file,
- llvm::sys::fs::perms::all_all))),
- UseNormalizedPaths(UseNormalizedPaths) {}
-
-InMemoryFileSystem::~InMemoryFileSystem() = default;
-
-std::string InMemoryFileSystem::toString() const {
- return Root->toString(/*Indent=*/0);
-}
-
-bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
- std::unique_ptr<llvm::MemoryBuffer> Buffer,
- Optional<uint32_t> User,
- Optional<uint32_t> Group,
- Optional<llvm::sys::fs::file_type> Type,
- Optional<llvm::sys::fs::perms> Perms,
- const detail::InMemoryFile *HardLinkTarget) {
- SmallString<128> Path;
- P.toVector(Path);
-
- // Fix up relative paths. This just prepends the current working directory.
- std::error_code EC = makeAbsolute(Path);
- assert(!EC);
- (void)EC;
-
- if (useNormalizedPaths())
- llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
-
- if (Path.empty())
- return false;
-
- detail::InMemoryDirectory *Dir = Root.get();
- auto I = llvm::sys::path::begin(Path), E = sys::path::end(Path);
- const auto ResolvedUser = User.getValueOr(0);
- const auto ResolvedGroup = Group.getValueOr(0);
- const auto ResolvedType = Type.getValueOr(sys::fs::file_type::regular_file);
- const auto ResolvedPerms = Perms.getValueOr(sys::fs::all_all);
- assert(!(HardLinkTarget && Buffer) && "HardLink cannot have a buffer");
- // Any intermediate directories we create should be accessible by
- // the owner, even if Perms says otherwise for the final path.
- const auto NewDirectoryPerms = ResolvedPerms | sys::fs::owner_all;
- while (true) {
- StringRef Name = *I;
- detail::InMemoryNode *Node = Dir->getChild(Name);
- ++I;
- if (!Node) {
- if (I == E) {
- // End of the path.
- std::unique_ptr<detail::InMemoryNode> Child;
- if (HardLinkTarget)
- Child.reset(new detail::InMemoryHardLink(P.str(), *HardLinkTarget));
- else {
- // Create a new file or directory.
- Status Stat(P.str(), getNextVirtualUniqueID(),
- llvm::sys::toTimePoint(ModificationTime), ResolvedUser,
- ResolvedGroup, Buffer->getBufferSize(), ResolvedType,
- ResolvedPerms);
- if (ResolvedType == sys::fs::file_type::directory_file) {
- Child.reset(new detail::InMemoryDirectory(std::move(Stat)));
- } else {
- Child.reset(
- new detail::InMemoryFile(std::move(Stat), std::move(Buffer)));
- }
- }
- Dir->addChild(Name, std::move(Child));
- return true;
- }
-
- // Create a new directory. Use the path up to here.
- Status Stat(
- StringRef(Path.str().begin(), Name.end() - Path.str().begin()),
- getNextVirtualUniqueID(), llvm::sys::toTimePoint(ModificationTime),
- ResolvedUser, ResolvedGroup, 0, sys::fs::file_type::directory_file,
- NewDirectoryPerms);
- Dir = cast<detail::InMemoryDirectory>(Dir->addChild(
- Name, llvm::make_unique<detail::InMemoryDirectory>(std::move(Stat))));
- continue;
- }
-
- if (auto *NewDir = dyn_cast<detail::InMemoryDirectory>(Node)) {
- Dir = NewDir;
- } else {
- assert((isa<detail::InMemoryFile>(Node) ||
- isa<detail::InMemoryHardLink>(Node)) &&
- "Must be either file, hardlink or directory!");
-
- // Trying to insert a directory in place of a file.
- if (I != E)
- return false;
-
- // Return false only if the new file is different from the existing one.
- if (auto Link = dyn_cast<detail::InMemoryHardLink>(Node)) {
- return Link->getResolvedFile().getBuffer()->getBuffer() ==
- Buffer->getBuffer();
- }
- return cast<detail::InMemoryFile>(Node)->getBuffer()->getBuffer() ==
- Buffer->getBuffer();
- }
- }
-}
-
-bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
- std::unique_ptr<llvm::MemoryBuffer> Buffer,
- Optional<uint32_t> User,
- Optional<uint32_t> Group,
- Optional<llvm::sys::fs::file_type> Type,
- Optional<llvm::sys::fs::perms> Perms) {
- return addFile(P, ModificationTime, std::move(Buffer), User, Group, Type,
- Perms, /*HardLinkTarget=*/nullptr);
-}
-
-bool InMemoryFileSystem::addFileNoOwn(const Twine &P, time_t ModificationTime,
- llvm::MemoryBuffer *Buffer,
- Optional<uint32_t> User,
- Optional<uint32_t> Group,
- Optional<llvm::sys::fs::file_type> Type,
- Optional<llvm::sys::fs::perms> Perms) {
- return addFile(P, ModificationTime,
- llvm::MemoryBuffer::getMemBuffer(
- Buffer->getBuffer(), Buffer->getBufferIdentifier()),
- std::move(User), std::move(Group), std::move(Type),
- std::move(Perms));
-}
-
-static ErrorOr<const detail::InMemoryNode *>
-lookupInMemoryNode(const InMemoryFileSystem &FS, detail::InMemoryDirectory *Dir,
- const Twine &P) {
- SmallString<128> Path;
- P.toVector(Path);
-
- // Fix up relative paths. This just prepends the current working directory.
- std::error_code EC = FS.makeAbsolute(Path);
- assert(!EC);
- (void)EC;
-
- if (FS.useNormalizedPaths())
- llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
-
- if (Path.empty())
- return Dir;
-
- auto I = llvm::sys::path::begin(Path), E = llvm::sys::path::end(Path);
- while (true) {
- detail::InMemoryNode *Node = Dir->getChild(*I);
- ++I;
- if (!Node)
- return errc::no_such_file_or_directory;
-
- // Return the file if it's at the end of the path.
- if (auto File = dyn_cast<detail::InMemoryFile>(Node)) {
- if (I == E)
- return File;
- return errc::no_such_file_or_directory;
- }
-
- // If Node is HardLink then return the resolved file.
- if (auto File = dyn_cast<detail::InMemoryHardLink>(Node)) {
- if (I == E)
- return &File->getResolvedFile();
- return errc::no_such_file_or_directory;
- }
- // Traverse directories.
- Dir = cast<detail::InMemoryDirectory>(Node);
- if (I == E)
- return Dir;
- }
-}
-
-bool InMemoryFileSystem::addHardLink(const Twine &FromPath,
- const Twine &ToPath) {
- auto FromNode = lookupInMemoryNode(*this, Root.get(), FromPath);
- auto ToNode = lookupInMemoryNode(*this, Root.get(), ToPath);
- // FromPath must not have been added before. ToPath must have been added
- // before. Resolved ToPath must be a File.
- if (!ToNode || FromNode || !isa<detail::InMemoryFile>(*ToNode))
- return false;
- return this->addFile(FromPath, 0, nullptr, None, None, None, None,
- cast<detail::InMemoryFile>(*ToNode));
-}
-
-llvm::ErrorOr<Status> InMemoryFileSystem::status(const Twine &Path) {
- auto Node = lookupInMemoryNode(*this, Root.get(), Path);
- if (Node)
- return detail::getNodeStatus(*Node, Path.str());
- return Node.getError();
-}
-
-llvm::ErrorOr<std::unique_ptr<File>>
-InMemoryFileSystem::openFileForRead(const Twine &Path) {
- auto Node = lookupInMemoryNode(*this, Root.get(), Path);
- if (!Node)
- return Node.getError();
-
- // When we have a file provide a heap-allocated wrapper for the memory buffer
- // to match the ownership semantics for File.
- if (auto *F = dyn_cast<detail::InMemoryFile>(*Node))
- return std::unique_ptr<File>(
- new detail::InMemoryFileAdaptor(*F, Path.str()));
-
- // FIXME: errc::not_a_file?
- return make_error_code(llvm::errc::invalid_argument);
-}
-
-namespace {
-
-/// Adaptor from InMemoryDir::iterator to directory_iterator.
-class InMemoryDirIterator : public clang::vfs::detail::DirIterImpl {
- detail::InMemoryDirectory::const_iterator I;
- detail::InMemoryDirectory::const_iterator E;
- std::string RequestedDirName;
-
- void setCurrentEntry() {
- if (I != E) {
- SmallString<256> Path(RequestedDirName);
- llvm::sys::path::append(Path, I->second->getFileName());
- sys::fs::file_type Type;
- switch (I->second->getKind()) {
- case detail::IME_File:
- case detail::IME_HardLink:
- Type = sys::fs::file_type::regular_file;
- break;
- case detail::IME_Directory:
- Type = sys::fs::file_type::directory_file;
- break;
- }
- CurrentEntry = directory_entry(Path.str(), Type);
- } else {
- // When we're at the end, make CurrentEntry invalid and DirIterImpl will
- // do the rest.
- CurrentEntry = directory_entry();
- }
- }
-
-public:
- InMemoryDirIterator() = default;
-
- explicit InMemoryDirIterator(const detail::InMemoryDirectory &Dir,
- std::string RequestedDirName)
- : I(Dir.begin()), E(Dir.end()),
- RequestedDirName(std::move(RequestedDirName)) {
- setCurrentEntry();
- }
-
- std::error_code increment() override {
- ++I;
- setCurrentEntry();
- return {};
- }
-};
-
-} // namespace
-
-directory_iterator InMemoryFileSystem::dir_begin(const Twine &Dir,
- std::error_code &EC) {
- auto Node = lookupInMemoryNode(*this, Root.get(), Dir);
- if (!Node) {
- EC = Node.getError();
- return directory_iterator(std::make_shared<InMemoryDirIterator>());
- }
-
- if (auto *DirNode = dyn_cast<detail::InMemoryDirectory>(*Node))
- return directory_iterator(
- std::make_shared<InMemoryDirIterator>(*DirNode, Dir.str()));
-
- EC = make_error_code(llvm::errc::not_a_directory);
- return directory_iterator(std::make_shared<InMemoryDirIterator>());
-}
-
-std::error_code InMemoryFileSystem::setCurrentWorkingDirectory(const Twine &P) {
- SmallString<128> Path;
- P.toVector(Path);
-
- // Fix up relative paths. This just prepends the current working directory.
- std::error_code EC = makeAbsolute(Path);
- assert(!EC);
- (void)EC;
-
- if (useNormalizedPaths())
- llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
-
- if (!Path.empty())
- WorkingDirectory = Path.str();
- return {};
-}
-
-std::error_code
-InMemoryFileSystem::getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const {
- auto CWD = getCurrentWorkingDirectory();
- if (!CWD || CWD->empty())
- return errc::operation_not_permitted;
- Path.toVector(Output);
- if (auto EC = makeAbsolute(Output))
- return EC;
- llvm::sys::path::remove_dots(Output, /*remove_dot_dot=*/true);
- return {};
-}
-
-} // namespace vfs
-} // namespace clang
-
-//===-----------------------------------------------------------------------===/
-// RedirectingFileSystem implementation
-//===-----------------------------------------------------------------------===/
-
-namespace {
-
-enum EntryKind {
- EK_Directory,
- EK_File
-};
-
-/// A single file or directory in the VFS.
-class Entry {
- EntryKind Kind;
- std::string Name;
-
-public:
- Entry(EntryKind K, StringRef Name) : Kind(K), Name(Name) {}
- virtual ~Entry() = default;
-
- StringRef getName() const { return Name; }
- EntryKind getKind() const { return Kind; }
-};
-
-class RedirectingDirectoryEntry : public Entry {
- std::vector<std::unique_ptr<Entry>> Contents;
- Status S;
-
-public:
- RedirectingDirectoryEntry(StringRef Name,
- std::vector<std::unique_ptr<Entry>> Contents,
- Status S)
- : Entry(EK_Directory, Name), Contents(std::move(Contents)),
- S(std::move(S)) {}
- RedirectingDirectoryEntry(StringRef Name, Status S)
- : Entry(EK_Directory, Name), S(std::move(S)) {}
-
- Status getStatus() { return S; }
-
- void addContent(std::unique_ptr<Entry> Content) {
- Contents.push_back(std::move(Content));
- }
-
- Entry *getLastContent() const { return Contents.back().get(); }
-
- using iterator = decltype(Contents)::iterator;
-
- iterator contents_begin() { return Contents.begin(); }
- iterator contents_end() { return Contents.end(); }
-
- static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
-};
-
-class RedirectingFileEntry : public Entry {
-public:
- enum NameKind {
- NK_NotSet,
- NK_External,
- NK_Virtual
- };
-
-private:
- std::string ExternalContentsPath;
- NameKind UseName;
-
-public:
- RedirectingFileEntry(StringRef Name, StringRef ExternalContentsPath,
- NameKind UseName)
- : Entry(EK_File, Name), ExternalContentsPath(ExternalContentsPath),
- UseName(UseName) {}
-
- StringRef getExternalContentsPath() const { return ExternalContentsPath; }
-
- /// whether to use the external path as the name for this file.
- bool useExternalName(bool GlobalUseExternalName) const {
- return UseName == NK_NotSet ? GlobalUseExternalName
- : (UseName == NK_External);
- }
-
- NameKind getUseName() const { return UseName; }
-
- static bool classof(const Entry *E) { return E->getKind() == EK_File; }
-};
-
-class VFSFromYamlDirIterImpl : public clang::vfs::detail::DirIterImpl {
- std::string Dir;
- RedirectingDirectoryEntry::iterator Current, End;
-
- std::error_code incrementImpl();
-
-public:
- VFSFromYamlDirIterImpl(const Twine &Path,
- RedirectingDirectoryEntry::iterator Begin,
- RedirectingDirectoryEntry::iterator End,
- std::error_code &EC);
-
- std::error_code increment() override;
-};
-
-/// A virtual file system parsed from a YAML file.
-///
-/// Currently, this class allows creating virtual directories and mapping
-/// virtual file paths to existing external files, available in \c ExternalFS.
-///
-/// The basic structure of the parsed file is:
-/// \verbatim
-/// {
-/// 'version': <version number>,
-/// <optional configuration>
-/// 'roots': [
-/// <directory entries>
-/// ]
-/// }
-/// \endverbatim
-///
-/// All configuration options are optional.
-/// 'case-sensitive': <boolean, default=true>
-/// 'use-external-names': <boolean, default=true>
-/// 'overlay-relative': <boolean, default=false>
-/// 'ignore-non-existent-contents': <boolean, default=true>
-///
-/// Virtual directories are represented as
-/// \verbatim
-/// {
-/// 'type': 'directory',
-/// 'name': <string>,
-/// 'contents': [ <file or directory entries> ]
-/// }
-/// \endverbatim
-///
-/// The default attributes for virtual directories are:
-/// \verbatim
-/// MTime = now() when created
-/// Perms = 0777
-/// User = Group = 0
-/// Size = 0
-/// UniqueID = unspecified unique value
-/// \endverbatim
-///
-/// Re-mapped files are represented as
-/// \verbatim
-/// {
-/// 'type': 'file',
-/// 'name': <string>,
-/// 'use-external-name': <boolean> # Optional
-/// 'external-contents': <path to external file>
-/// }
-/// \endverbatim
-///
-/// and inherit their attributes from the external contents.
-///
-/// In both cases, the 'name' field may contain multiple path components (e.g.
-/// /path/to/file). However, any directory that contains more than one child
-/// must be uniquely represented by a directory entry.
-class RedirectingFileSystem : public vfs::FileSystem {
- friend class RedirectingFileSystemParser;
-
- /// The root(s) of the virtual file system.
- std::vector<std::unique_ptr<Entry>> Roots;
-
- /// The file system to use for external references.
- IntrusiveRefCntPtr<FileSystem> ExternalFS;
-
- /// If IsRelativeOverlay is set, this represents the directory
- /// path that should be prefixed to each 'external-contents' entry
- /// when reading from YAML files.
- std::string ExternalContentsPrefixDir;
-
- /// @name Configuration
- /// @{
-
- /// Whether to perform case-sensitive comparisons.
- ///
- /// Currently, case-insensitive matching only works correctly with ASCII.
- bool CaseSensitive = true;
-
- /// IsRelativeOverlay marks whether a ExternalContentsPrefixDir path must
- /// be prefixed in every 'external-contents' when reading from YAML files.
- bool IsRelativeOverlay = false;
-
- /// Whether to use to use the value of 'external-contents' for the
- /// names of files. This global value is overridable on a per-file basis.
- bool UseExternalNames = true;
-
- /// Whether an invalid path obtained via 'external-contents' should
- /// cause iteration on the VFS to stop. If 'true', the VFS should ignore
- /// the entry and continue with the next. Allows YAML files to be shared
- /// across multiple compiler invocations regardless of prior existent
- /// paths in 'external-contents'. This global value is overridable on a
- /// per-file basis.
- bool IgnoreNonExistentContents = true;
- /// @}
-
- /// Virtual file paths and external files could be canonicalized without "..",
- /// "." and "./" in their paths. FIXME: some unittests currently fail on
- /// win32 when using remove_dots and remove_leading_dotslash on paths.
- bool UseCanonicalizedPaths =
-#ifdef _WIN32
- false;
-#else
- true;
-#endif
-
-private:
- RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS)
- : ExternalFS(std::move(ExternalFS)) {}
-
- /// Looks up the path <tt>[Start, End)</tt> in \p From, possibly
- /// recursing into the contents of \p From if it is a directory.
- ErrorOr<Entry *> lookupPath(sys::path::const_iterator Start,
- sys::path::const_iterator End, Entry *From);
-
- /// Get the status of a given an \c Entry.
- ErrorOr<Status> status(const Twine &Path, Entry *E);
-
-public:
- /// Looks up \p Path in \c Roots.
- ErrorOr<Entry *> lookupPath(const Twine &Path);
-
- /// Parses \p Buffer, which is expected to be in YAML format and
- /// returns a virtual file system representing its contents.
- static RedirectingFileSystem *
- create(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler, StringRef YAMLFilePath,
- void *DiagContext, IntrusiveRefCntPtr<FileSystem> ExternalFS);
-
- ErrorOr<Status> status(const Twine &Path) override;
- ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
-
- llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
- return ExternalFS->getCurrentWorkingDirectory();
- }
-
- std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
- return ExternalFS->setCurrentWorkingDirectory(Path);
- }
-
- directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override{
- ErrorOr<Entry *> E = lookupPath(Dir);
- if (!E) {
- EC = E.getError();
- return {};
- }
- ErrorOr<Status> S = status(Dir, *E);
- if (!S) {
- EC = S.getError();
- return {};
- }
- if (!S->isDirectory()) {
- EC = std::error_code(static_cast<int>(errc::not_a_directory),
- std::system_category());
- return {};
- }
-
- auto *D = cast<RedirectingDirectoryEntry>(*E);
- return directory_iterator(std::make_shared<VFSFromYamlDirIterImpl>(
- Dir, D->contents_begin(), D->contents_end(), EC));
- }
-
- void setExternalContentsPrefixDir(StringRef PrefixDir) {
- ExternalContentsPrefixDir = PrefixDir.str();
- }
-
- StringRef getExternalContentsPrefixDir() const {
- return ExternalContentsPrefixDir;
- }
-
- bool ignoreNonExistentContents() const {
- return IgnoreNonExistentContents;
- }
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-LLVM_DUMP_METHOD void dump() const {
- for (const auto &Root : Roots)
- dumpEntry(Root.get());
- }
-
-LLVM_DUMP_METHOD void dumpEntry(Entry *E, int NumSpaces = 0) const {
- StringRef Name = E->getName();
- for (int i = 0, e = NumSpaces; i < e; ++i)
- dbgs() << " ";
- dbgs() << "'" << Name.str().c_str() << "'" << "\n";
-
- if (E->getKind() == EK_Directory) {
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(E);
- assert(DE && "Should be a directory");
-
- for (std::unique_ptr<Entry> &SubEntry :
- llvm::make_range(DE->contents_begin(), DE->contents_end()))
- dumpEntry(SubEntry.get(), NumSpaces+2);
- }
- }
-#endif
-};
-
-/// A helper class to hold the common YAML parsing state.
-class RedirectingFileSystemParser {
- yaml::Stream &Stream;
-
- void error(yaml::Node *N, const Twine &Msg) {
- Stream.printError(N, Msg);
- }
-
- // false on error
- bool parseScalarString(yaml::Node *N, StringRef &Result,
- SmallVectorImpl<char> &Storage) {
- const auto *S = dyn_cast<yaml::ScalarNode>(N);
-
- if (!S) {
- error(N, "expected string");
- return false;
- }
- Result = S->getValue(Storage);
- return true;
- }
-
- // false on error
- bool parseScalarBool(yaml::Node *N, bool &Result) {
- SmallString<5> Storage;
- StringRef Value;
- if (!parseScalarString(N, Value, Storage))
- return false;
-
- if (Value.equals_lower("true") || Value.equals_lower("on") ||
- Value.equals_lower("yes") || Value == "1") {
- Result = true;
- return true;
- } else if (Value.equals_lower("false") || Value.equals_lower("off") ||
- Value.equals_lower("no") || Value == "0") {
- Result = false;
- return true;
- }
-
- error(N, "expected boolean value");
- return false;
- }
-
- struct KeyStatus {
- bool Required;
- bool Seen = false;
-
- KeyStatus(bool Required = false) : Required(Required) {}
- };
-
- using KeyStatusPair = std::pair<StringRef, KeyStatus>;
-
- // false on error
- bool checkDuplicateOrUnknownKey(yaml::Node *KeyNode, StringRef Key,
- DenseMap<StringRef, KeyStatus> &Keys) {
- if (!Keys.count(Key)) {
- error(KeyNode, "unknown key");
- return false;
- }
- KeyStatus &S = Keys[Key];
- if (S.Seen) {
- error(KeyNode, Twine("duplicate key '") + Key + "'");
- return false;
- }
- S.Seen = true;
- return true;
- }
-
- // false on error
- bool checkMissingKeys(yaml::Node *Obj, DenseMap<StringRef, KeyStatus> &Keys) {
- for (const auto &I : Keys) {
- if (I.second.Required && !I.second.Seen) {
- error(Obj, Twine("missing key '") + I.first + "'");
- return false;
- }
- }
- return true;
- }
-
- Entry *lookupOrCreateEntry(RedirectingFileSystem *FS, StringRef Name,
- Entry *ParentEntry = nullptr) {
- if (!ParentEntry) { // Look for a existent root
- for (const auto &Root : FS->Roots) {
- if (Name.equals(Root->getName())) {
- ParentEntry = Root.get();
- return ParentEntry;
- }
- }
- } else { // Advance to the next component
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(ParentEntry);
- for (std::unique_ptr<Entry> &Content :
- llvm::make_range(DE->contents_begin(), DE->contents_end())) {
- auto *DirContent = dyn_cast<RedirectingDirectoryEntry>(Content.get());
- if (DirContent && Name.equals(Content->getName()))
- return DirContent;
- }
- }
-
- // ... or create a new one
- std::unique_ptr<Entry> E = llvm::make_unique<RedirectingDirectoryEntry>(
- Name,
- Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
- 0, 0, 0, file_type::directory_file, sys::fs::all_all));
-
- if (!ParentEntry) { // Add a new root to the overlay
- FS->Roots.push_back(std::move(E));
- ParentEntry = FS->Roots.back().get();
- return ParentEntry;
- }
-
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(ParentEntry);
- DE->addContent(std::move(E));
- return DE->getLastContent();
- }
-
- void uniqueOverlayTree(RedirectingFileSystem *FS, Entry *SrcE,
- Entry *NewParentE = nullptr) {
- StringRef Name = SrcE->getName();
- switch (SrcE->getKind()) {
- case EK_Directory: {
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(SrcE);
- assert(DE && "Must be a directory");
- // Empty directories could be present in the YAML as a way to
- // describe a file for a current directory after some of its subdir
- // is parsed. This only leads to redundant walks, ignore it.
- if (!Name.empty())
- NewParentE = lookupOrCreateEntry(FS, Name, NewParentE);
- for (std::unique_ptr<Entry> &SubEntry :
- llvm::make_range(DE->contents_begin(), DE->contents_end()))
- uniqueOverlayTree(FS, SubEntry.get(), NewParentE);
- break;
- }
- case EK_File: {
- auto *FE = dyn_cast<RedirectingFileEntry>(SrcE);
- assert(FE && "Must be a file");
- assert(NewParentE && "Parent entry must exist");
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(NewParentE);
- DE->addContent(llvm::make_unique<RedirectingFileEntry>(
- Name, FE->getExternalContentsPath(), FE->getUseName()));
- break;
- }
- }
- }
-
- std::unique_ptr<Entry> parseEntry(yaml::Node *N, RedirectingFileSystem *FS,
- bool IsRootEntry) {
- auto *M = dyn_cast<yaml::MappingNode>(N);
- if (!M) {
- error(N, "expected mapping node for file or directory entry");
- return nullptr;
- }
-
- KeyStatusPair Fields[] = {
- KeyStatusPair("name", true),
- KeyStatusPair("type", true),
- KeyStatusPair("contents", false),
- KeyStatusPair("external-contents", false),
- KeyStatusPair("use-external-name", false),
- };
-
- DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
-
- bool HasContents = false; // external or otherwise
- std::vector<std::unique_ptr<Entry>> EntryArrayContents;
- std::string ExternalContentsPath;
- std::string Name;
- yaml::Node *NameValueNode;
- auto UseExternalName = RedirectingFileEntry::NK_NotSet;
- EntryKind Kind;
-
- for (auto &I : *M) {
- StringRef Key;
- // Reuse the buffer for key and value, since we don't look at key after
- // parsing value.
- SmallString<256> Buffer;
- if (!parseScalarString(I.getKey(), Key, Buffer))
- return nullptr;
-
- if (!checkDuplicateOrUnknownKey(I.getKey(), Key, Keys))
- return nullptr;
-
- StringRef Value;
- if (Key == "name") {
- if (!parseScalarString(I.getValue(), Value, Buffer))
- return nullptr;
-
- NameValueNode = I.getValue();
- if (FS->UseCanonicalizedPaths) {
- SmallString<256> Path(Value);
- // Guarantee that old YAML files containing paths with ".." and "."
- // are properly canonicalized before read into the VFS.
- Path = sys::path::remove_leading_dotslash(Path);
- sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- Name = Path.str();
- } else {
- Name = Value;
- }
- } else if (Key == "type") {
- if (!parseScalarString(I.getValue(), Value, Buffer))
- return nullptr;
- if (Value == "file")
- Kind = EK_File;
- else if (Value == "directory")
- Kind = EK_Directory;
- else {
- error(I.getValue(), "unknown value for 'type'");
- return nullptr;
- }
- } else if (Key == "contents") {
- if (HasContents) {
- error(I.getKey(),
- "entry already has 'contents' or 'external-contents'");
- return nullptr;
- }
- HasContents = true;
- auto *Contents = dyn_cast<yaml::SequenceNode>(I.getValue());
- if (!Contents) {
- // FIXME: this is only for directories, what about files?
- error(I.getValue(), "expected array");
- return nullptr;
- }
-
- for (auto &I : *Contents) {
- if (std::unique_ptr<Entry> E =
- parseEntry(&I, FS, /*IsRootEntry*/ false))
- EntryArrayContents.push_back(std::move(E));
- else
- return nullptr;
- }
- } else if (Key == "external-contents") {
- if (HasContents) {
- error(I.getKey(),
- "entry already has 'contents' or 'external-contents'");
- return nullptr;
- }
- HasContents = true;
- if (!parseScalarString(I.getValue(), Value, Buffer))
- return nullptr;
-
- SmallString<256> FullPath;
- if (FS->IsRelativeOverlay) {
- FullPath = FS->getExternalContentsPrefixDir();
- assert(!FullPath.empty() &&
- "External contents prefix directory must exist");
- llvm::sys::path::append(FullPath, Value);
- } else {
- FullPath = Value;
- }
-
- if (FS->UseCanonicalizedPaths) {
- // Guarantee that old YAML files containing paths with ".." and "."
- // are properly canonicalized before read into the VFS.
- FullPath = sys::path::remove_leading_dotslash(FullPath);
- sys::path::remove_dots(FullPath, /*remove_dot_dot=*/true);
- }
- ExternalContentsPath = FullPath.str();
- } else if (Key == "use-external-name") {
- bool Val;
- if (!parseScalarBool(I.getValue(), Val))
- return nullptr;
- UseExternalName = Val ? RedirectingFileEntry::NK_External
- : RedirectingFileEntry::NK_Virtual;
- } else {
- llvm_unreachable("key missing from Keys");
- }
- }
-
- if (Stream.failed())
- return nullptr;
-
- // check for missing keys
- if (!HasContents) {
- error(N, "missing key 'contents' or 'external-contents'");
- return nullptr;
- }
- if (!checkMissingKeys(N, Keys))
- return nullptr;
-
- // check invalid configuration
- if (Kind == EK_Directory &&
- UseExternalName != RedirectingFileEntry::NK_NotSet) {
- error(N, "'use-external-name' is not supported for directories");
- return nullptr;
- }
-
- if (IsRootEntry && !sys::path::is_absolute(Name)) {
- assert(NameValueNode && "Name presence should be checked earlier");
- error(NameValueNode,
- "entry with relative path at the root level is not discoverable");
- return nullptr;
- }
-
- // Remove trailing slash(es), being careful not to remove the root path
- StringRef Trimmed(Name);
- size_t RootPathLen = sys::path::root_path(Trimmed).size();
- while (Trimmed.size() > RootPathLen &&
- sys::path::is_separator(Trimmed.back()))
- Trimmed = Trimmed.slice(0, Trimmed.size()-1);
- // Get the last component
- StringRef LastComponent = sys::path::filename(Trimmed);
-
- std::unique_ptr<Entry> Result;
- switch (Kind) {
- case EK_File:
- Result = llvm::make_unique<RedirectingFileEntry>(
- LastComponent, std::move(ExternalContentsPath), UseExternalName);
- break;
- case EK_Directory:
- Result = llvm::make_unique<RedirectingDirectoryEntry>(
- LastComponent, std::move(EntryArrayContents),
- Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
- 0, 0, 0, file_type::directory_file, sys::fs::all_all));
- break;
- }
-
- StringRef Parent = sys::path::parent_path(Trimmed);
- if (Parent.empty())
- return Result;
-
- // if 'name' contains multiple components, create implicit directory entries
- for (sys::path::reverse_iterator I = sys::path::rbegin(Parent),
- E = sys::path::rend(Parent);
- I != E; ++I) {
- std::vector<std::unique_ptr<Entry>> Entries;
- Entries.push_back(std::move(Result));
- Result = llvm::make_unique<RedirectingDirectoryEntry>(
- *I, std::move(Entries),
- Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
- 0, 0, 0, file_type::directory_file, sys::fs::all_all));
- }
- return Result;
- }
-
-public:
- RedirectingFileSystemParser(yaml::Stream &S) : Stream(S) {}
-
- // false on error
- bool parse(yaml::Node *Root, RedirectingFileSystem *FS) {
- auto *Top = dyn_cast<yaml::MappingNode>(Root);
- if (!Top) {
- error(Root, "expected mapping node");
- return false;
- }
-
- KeyStatusPair Fields[] = {
- KeyStatusPair("version", true),
- KeyStatusPair("case-sensitive", false),
- KeyStatusPair("use-external-names", false),
- KeyStatusPair("overlay-relative", false),
- KeyStatusPair("ignore-non-existent-contents", false),
- KeyStatusPair("roots", true),
- };
-
- DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
- std::vector<std::unique_ptr<Entry>> RootEntries;
-
- // Parse configuration and 'roots'
- for (auto &I : *Top) {
- SmallString<10> KeyBuffer;
- StringRef Key;
- if (!parseScalarString(I.getKey(), Key, KeyBuffer))
- return false;
-
- if (!checkDuplicateOrUnknownKey(I.getKey(), Key, Keys))
- return false;
-
- if (Key == "roots") {
- auto *Roots = dyn_cast<yaml::SequenceNode>(I.getValue());
- if (!Roots) {
- error(I.getValue(), "expected array");
- return false;
- }
-
- for (auto &I : *Roots) {
- if (std::unique_ptr<Entry> E =
- parseEntry(&I, FS, /*IsRootEntry*/ true))
- RootEntries.push_back(std::move(E));
- else
- return false;
- }
- } else if (Key == "version") {
- StringRef VersionString;
- SmallString<4> Storage;
- if (!parseScalarString(I.getValue(), VersionString, Storage))
- return false;
- int Version;
- if (VersionString.getAsInteger<int>(10, Version)) {
- error(I.getValue(), "expected integer");
- return false;
- }
- if (Version < 0) {
- error(I.getValue(), "invalid version number");
- return false;
- }
- if (Version != 0) {
- error(I.getValue(), "version mismatch, expected 0");
- return false;
- }
- } else if (Key == "case-sensitive") {
- if (!parseScalarBool(I.getValue(), FS->CaseSensitive))
- return false;
- } else if (Key == "overlay-relative") {
- if (!parseScalarBool(I.getValue(), FS->IsRelativeOverlay))
- return false;
- } else if (Key == "use-external-names") {
- if (!parseScalarBool(I.getValue(), FS->UseExternalNames))
- return false;
- } else if (Key == "ignore-non-existent-contents") {
- if (!parseScalarBool(I.getValue(), FS->IgnoreNonExistentContents))
- return false;
- } else {
- llvm_unreachable("key missing from Keys");
- }
- }
-
- if (Stream.failed())
- return false;
-
- if (!checkMissingKeys(Top, Keys))
- return false;
-
- // Now that we sucessefully parsed the YAML file, canonicalize the internal
- // representation to a proper directory tree so that we can search faster
- // inside the VFS.
- for (auto &E : RootEntries)
- uniqueOverlayTree(FS, E.get());
-
- return true;
- }
-};
-
-} // namespace
-
-RedirectingFileSystem *
-RedirectingFileSystem::create(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler,
- StringRef YAMLFilePath, void *DiagContext,
- IntrusiveRefCntPtr<FileSystem> ExternalFS) {
- SourceMgr SM;
- yaml::Stream Stream(Buffer->getMemBufferRef(), SM);
-
- SM.setDiagHandler(DiagHandler, DiagContext);
- yaml::document_iterator DI = Stream.begin();
- yaml::Node *Root = DI->getRoot();
- if (DI == Stream.end() || !Root) {
- SM.PrintMessage(SMLoc(), SourceMgr::DK_Error, "expected root node");
- return nullptr;
- }
-
- RedirectingFileSystemParser P(Stream);
-
- std::unique_ptr<RedirectingFileSystem> FS(
- new RedirectingFileSystem(std::move(ExternalFS)));
-
- if (!YAMLFilePath.empty()) {
- // Use the YAML path from -ivfsoverlay to compute the dir to be prefixed
- // to each 'external-contents' path.
- //
- // Example:
- // -ivfsoverlay dummy.cache/vfs/vfs.yaml
- // yields:
- // FS->ExternalContentsPrefixDir => /<absolute_path_to>/dummy.cache/vfs
- //
- SmallString<256> OverlayAbsDir = sys::path::parent_path(YAMLFilePath);
- std::error_code EC = llvm::sys::fs::make_absolute(OverlayAbsDir);
- assert(!EC && "Overlay dir final path must be absolute");
- (void)EC;
- FS->setExternalContentsPrefixDir(OverlayAbsDir);
- }
-
- if (!P.parse(Root, FS.get()))
- return nullptr;
-
- return FS.release();
-}
-
-ErrorOr<Entry *> RedirectingFileSystem::lookupPath(const Twine &Path_) {
- SmallString<256> Path;
- Path_.toVector(Path);
-
- // Handle relative paths
- if (std::error_code EC = makeAbsolute(Path))
- return EC;
-
- // Canonicalize path by removing ".", "..", "./", etc components. This is
- // a VFS request, do bot bother about symlinks in the path components
- // but canonicalize in order to perform the correct entry search.
- if (UseCanonicalizedPaths) {
- Path = sys::path::remove_leading_dotslash(Path);
- sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- }
-
- if (Path.empty())
- return make_error_code(llvm::errc::invalid_argument);
-
- sys::path::const_iterator Start = sys::path::begin(Path);
- sys::path::const_iterator End = sys::path::end(Path);
- for (const auto &Root : Roots) {
- ErrorOr<Entry *> Result = lookupPath(Start, End, Root.get());
- if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
- return Result;
- }
- return make_error_code(llvm::errc::no_such_file_or_directory);
-}
-
-ErrorOr<Entry *>
-RedirectingFileSystem::lookupPath(sys::path::const_iterator Start,
- sys::path::const_iterator End, Entry *From) {
-#ifndef _WIN32
- assert(!isTraversalComponent(*Start) &&
- !isTraversalComponent(From->getName()) &&
- "Paths should not contain traversal components");
-#else
- // FIXME: this is here to support windows, remove it once canonicalized
- // paths become globally default.
- if (Start->equals("."))
- ++Start;
-#endif
-
- StringRef FromName = From->getName();
-
- // Forward the search to the next component in case this is an empty one.
- if (!FromName.empty()) {
- if (CaseSensitive ? !Start->equals(FromName)
- : !Start->equals_lower(FromName))
- // failure to match
- return make_error_code(llvm::errc::no_such_file_or_directory);
-
- ++Start;
-
- if (Start == End) {
- // Match!
- return From;
- }
- }
-
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(From);
- if (!DE)
- return make_error_code(llvm::errc::not_a_directory);
-
- for (const std::unique_ptr<Entry> &DirEntry :
- llvm::make_range(DE->contents_begin(), DE->contents_end())) {
- ErrorOr<Entry *> Result = lookupPath(Start, End, DirEntry.get());
- if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
- return Result;
- }
- return make_error_code(llvm::errc::no_such_file_or_directory);
-}
-
-static Status getRedirectedFileStatus(const Twine &Path, bool UseExternalNames,
- Status ExternalStatus) {
- Status S = ExternalStatus;
- if (!UseExternalNames)
- S = Status::copyWithNewName(S, Path.str());
- S.IsVFSMapped = true;
- return S;
-}
-
-ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path, Entry *E) {
- assert(E != nullptr);
- if (auto *F = dyn_cast<RedirectingFileEntry>(E)) {
- ErrorOr<Status> S = ExternalFS->status(F->getExternalContentsPath());
- assert(!S || S->getName() == F->getExternalContentsPath());
- if (S)
- return getRedirectedFileStatus(Path, F->useExternalName(UseExternalNames),
- *S);
- return S;
- } else { // directory
- auto *DE = cast<RedirectingDirectoryEntry>(E);
- return Status::copyWithNewName(DE->getStatus(), Path.str());
- }
-}
-
-ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path) {
- ErrorOr<Entry *> Result = lookupPath(Path);
- if (!Result)
- return Result.getError();
- return status(Path, *Result);
-}
-
-namespace {
-
-/// Provide a file wrapper with an overriden status.
-class FileWithFixedStatus : public File {
- std::unique_ptr<File> InnerFile;
- Status S;
-
-public:
- FileWithFixedStatus(std::unique_ptr<File> InnerFile, Status S)
- : InnerFile(std::move(InnerFile)), S(std::move(S)) {}
-
- ErrorOr<Status> status() override { return S; }
- ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
-
- getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
- bool IsVolatile) override {
- return InnerFile->getBuffer(Name, FileSize, RequiresNullTerminator,
- IsVolatile);
- }
-
- std::error_code close() override { return InnerFile->close(); }
-};
-
-} // namespace
-
-ErrorOr<std::unique_ptr<File>>
-RedirectingFileSystem::openFileForRead(const Twine &Path) {
- ErrorOr<Entry *> E = lookupPath(Path);
- if (!E)
- return E.getError();
-
- auto *F = dyn_cast<RedirectingFileEntry>(*E);
- if (!F) // FIXME: errc::not_a_file?
- return make_error_code(llvm::errc::invalid_argument);
-
- auto Result = ExternalFS->openFileForRead(F->getExternalContentsPath());
- if (!Result)
- return Result;
-
- auto ExternalStatus = (*Result)->status();
- if (!ExternalStatus)
- return ExternalStatus.getError();
-
- // FIXME: Update the status with the name and VFSMapped.
- Status S = getRedirectedFileStatus(Path, F->useExternalName(UseExternalNames),
- *ExternalStatus);
- return std::unique_ptr<File>(
- llvm::make_unique<FileWithFixedStatus>(std::move(*Result), S));
-}
-
-IntrusiveRefCntPtr<FileSystem>
-vfs::getVFSFromYAML(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler,
- StringRef YAMLFilePath,
- void *DiagContext,
- IntrusiveRefCntPtr<FileSystem> ExternalFS) {
- return RedirectingFileSystem::create(std::move(Buffer), DiagHandler,
- YAMLFilePath, DiagContext,
- std::move(ExternalFS));
-}
-
-static void getVFSEntries(Entry *SrcE, SmallVectorImpl<StringRef> &Path,
- SmallVectorImpl<YAMLVFSEntry> &Entries) {
- auto Kind = SrcE->getKind();
- if (Kind == EK_Directory) {
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(SrcE);
- assert(DE && "Must be a directory");
- for (std::unique_ptr<Entry> &SubEntry :
- llvm::make_range(DE->contents_begin(), DE->contents_end())) {
- Path.push_back(SubEntry->getName());
- getVFSEntries(SubEntry.get(), Path, Entries);
- Path.pop_back();
- }
- return;
- }
-
- assert(Kind == EK_File && "Must be a EK_File");
- auto *FE = dyn_cast<RedirectingFileEntry>(SrcE);
- assert(FE && "Must be a file");
- SmallString<128> VPath;
- for (auto &Comp : Path)
- llvm::sys::path::append(VPath, Comp);
- Entries.push_back(YAMLVFSEntry(VPath.c_str(), FE->getExternalContentsPath()));
-}
-
-void vfs::collectVFSFromYAML(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler,
- StringRef YAMLFilePath,
- SmallVectorImpl<YAMLVFSEntry> &CollectedEntries,
- void *DiagContext,
- IntrusiveRefCntPtr<FileSystem> ExternalFS) {
- RedirectingFileSystem *VFS = RedirectingFileSystem::create(
- std::move(Buffer), DiagHandler, YAMLFilePath, DiagContext,
- std::move(ExternalFS));
- ErrorOr<Entry *> RootE = VFS->lookupPath("/");
- if (!RootE)
- return;
- SmallVector<StringRef, 8> Components;
- Components.push_back("/");
- getVFSEntries(*RootE, Components, CollectedEntries);
-}
-
-UniqueID vfs::getNextVirtualUniqueID() {
- static std::atomic<unsigned> UID;
- unsigned ID = ++UID;
- // The following assumes that uint64_t max will never collide with a real
- // dev_t value from the OS.
- return UniqueID(std::numeric_limits<uint64_t>::max(), ID);
-}
-
-void YAMLVFSWriter::addFileMapping(StringRef VirtualPath, StringRef RealPath) {
- assert(sys::path::is_absolute(VirtualPath) && "virtual path not absolute");
- assert(sys::path::is_absolute(RealPath) && "real path not absolute");
- assert(!pathHasTraversal(VirtualPath) && "path traversal is not supported");
- Mappings.emplace_back(VirtualPath, RealPath);
-}
-
-namespace {
-
-class JSONWriter {
- llvm::raw_ostream &OS;
- SmallVector<StringRef, 16> DirStack;
-
- unsigned getDirIndent() { return 4 * DirStack.size(); }
- unsigned getFileIndent() { return 4 * (DirStack.size() + 1); }
- bool containedIn(StringRef Parent, StringRef Path);
- StringRef containedPart(StringRef Parent, StringRef Path);
- void startDirectory(StringRef Path);
- void endDirectory();
- void writeEntry(StringRef VPath, StringRef RPath);
-
-public:
- JSONWriter(llvm::raw_ostream &OS) : OS(OS) {}
-
- void write(ArrayRef<YAMLVFSEntry> Entries, Optional<bool> UseExternalNames,
- Optional<bool> IsCaseSensitive, Optional<bool> IsOverlayRelative,
- Optional<bool> IgnoreNonExistentContents, StringRef OverlayDir);
-};
-
-} // namespace
-
-bool JSONWriter::containedIn(StringRef Parent, StringRef Path) {
- using namespace llvm::sys;
-
- // Compare each path component.
- auto IParent = path::begin(Parent), EParent = path::end(Parent);
- for (auto IChild = path::begin(Path), EChild = path::end(Path);
- IParent != EParent && IChild != EChild; ++IParent, ++IChild) {
- if (*IParent != *IChild)
- return false;
- }
- // Have we exhausted the parent path?
- return IParent == EParent;
-}
-
-StringRef JSONWriter::containedPart(StringRef Parent, StringRef Path) {
- assert(!Parent.empty());
- assert(containedIn(Parent, Path));
- return Path.slice(Parent.size() + 1, StringRef::npos);
-}
-
-void JSONWriter::startDirectory(StringRef Path) {
- StringRef Name =
- DirStack.empty() ? Path : containedPart(DirStack.back(), Path);
- DirStack.push_back(Path);
- unsigned Indent = getDirIndent();
- OS.indent(Indent) << "{\n";
- OS.indent(Indent + 2) << "'type': 'directory',\n";
- OS.indent(Indent + 2) << "'name': \"" << llvm::yaml::escape(Name) << "\",\n";
- OS.indent(Indent + 2) << "'contents': [\n";
-}
-
-void JSONWriter::endDirectory() {
- unsigned Indent = getDirIndent();
- OS.indent(Indent + 2) << "]\n";
- OS.indent(Indent) << "}";
-
- DirStack.pop_back();
-}
-
-void JSONWriter::writeEntry(StringRef VPath, StringRef RPath) {
- unsigned Indent = getFileIndent();
- OS.indent(Indent) << "{\n";
- OS.indent(Indent + 2) << "'type': 'file',\n";
- OS.indent(Indent + 2) << "'name': \"" << llvm::yaml::escape(VPath) << "\",\n";
- OS.indent(Indent + 2) << "'external-contents': \""
- << llvm::yaml::escape(RPath) << "\"\n";
- OS.indent(Indent) << "}";
-}
-
-void JSONWriter::write(ArrayRef<YAMLVFSEntry> Entries,
- Optional<bool> UseExternalNames,
- Optional<bool> IsCaseSensitive,
- Optional<bool> IsOverlayRelative,
- Optional<bool> IgnoreNonExistentContents,
- StringRef OverlayDir) {
- using namespace llvm::sys;
-
- OS << "{\n"
- " 'version': 0,\n";
- if (IsCaseSensitive.hasValue())
- OS << " 'case-sensitive': '"
- << (IsCaseSensitive.getValue() ? "true" : "false") << "',\n";
- if (UseExternalNames.hasValue())
- OS << " 'use-external-names': '"
- << (UseExternalNames.getValue() ? "true" : "false") << "',\n";
- bool UseOverlayRelative = false;
- if (IsOverlayRelative.hasValue()) {
- UseOverlayRelative = IsOverlayRelative.getValue();
- OS << " 'overlay-relative': '"
- << (UseOverlayRelative ? "true" : "false") << "',\n";
- }
- if (IgnoreNonExistentContents.hasValue())
- OS << " 'ignore-non-existent-contents': '"
- << (IgnoreNonExistentContents.getValue() ? "true" : "false") << "',\n";
- OS << " 'roots': [\n";
-
- if (!Entries.empty()) {
- const YAMLVFSEntry &Entry = Entries.front();
- startDirectory(path::parent_path(Entry.VPath));
-
- StringRef RPath = Entry.RPath;
- if (UseOverlayRelative) {
- unsigned OverlayDirLen = OverlayDir.size();
- assert(RPath.substr(0, OverlayDirLen) == OverlayDir &&
- "Overlay dir must be contained in RPath");
- RPath = RPath.slice(OverlayDirLen, RPath.size());
- }
-
- writeEntry(path::filename(Entry.VPath), RPath);
-
- for (const auto &Entry : Entries.slice(1)) {
- StringRef Dir = path::parent_path(Entry.VPath);
- if (Dir == DirStack.back())
- OS << ",\n";
- else {
- while (!DirStack.empty() && !containedIn(DirStack.back(), Dir)) {
- OS << "\n";
- endDirectory();
- }
- OS << ",\n";
- startDirectory(Dir);
- }
- StringRef RPath = Entry.RPath;
- if (UseOverlayRelative) {
- unsigned OverlayDirLen = OverlayDir.size();
- assert(RPath.substr(0, OverlayDirLen) == OverlayDir &&
- "Overlay dir must be contained in RPath");
- RPath = RPath.slice(OverlayDirLen, RPath.size());
- }
- writeEntry(path::filename(Entry.VPath), RPath);
- }
-
- while (!DirStack.empty()) {
- OS << "\n";
- endDirectory();
- }
- OS << "\n";
- }
-
- OS << " ]\n"
- << "}\n";
-}
-
-void YAMLVFSWriter::write(llvm::raw_ostream &OS) {
- llvm::sort(Mappings, [](const YAMLVFSEntry &LHS, const YAMLVFSEntry &RHS) {
- return LHS.VPath < RHS.VPath;
- });
-
- JSONWriter(OS).write(Mappings, UseExternalNames, IsCaseSensitive,
- IsOverlayRelative, IgnoreNonExistentContents,
- OverlayDir);
-}
-
-VFSFromYamlDirIterImpl::VFSFromYamlDirIterImpl(
- const Twine &_Path, RedirectingDirectoryEntry::iterator Begin,
- RedirectingDirectoryEntry::iterator End, std::error_code &EC)
- : Dir(_Path.str()), Current(Begin), End(End) {
- EC = incrementImpl();
-}
-
-std::error_code VFSFromYamlDirIterImpl::increment() {
- assert(Current != End && "cannot iterate past end");
- ++Current;
- return incrementImpl();
-}
-
-std::error_code VFSFromYamlDirIterImpl::incrementImpl() {
- while (Current != End) {
- SmallString<128> PathStr(Dir);
- llvm::sys::path::append(PathStr, (*Current)->getName());
- sys::fs::file_type Type;
- switch ((*Current)->getKind()) {
- case EK_Directory:
- Type = sys::fs::file_type::directory_file;
- break;
- case EK_File:
- Type = sys::fs::file_type::regular_file;
- break;
- }
- CurrentEntry = directory_entry(PathStr.str(), Type);
- break;
- }
-
- if (Current == End)
- CurrentEntry = directory_entry();
- return {};
-}
-
-vfs::recursive_directory_iterator::recursive_directory_iterator(FileSystem &FS_,
- const Twine &Path,
- std::error_code &EC)
- : FS(&FS_) {
- directory_iterator I = FS->dir_begin(Path, EC);
- if (I != directory_iterator()) {
- State = std::make_shared<IterState>();
- State->push(I);
- }
-}
-
-vfs::recursive_directory_iterator &
-recursive_directory_iterator::increment(std::error_code &EC) {
- assert(FS && State && !State->empty() && "incrementing past end");
- assert(!State->top()->path().empty() && "non-canonical end iterator");
- vfs::directory_iterator End;
- if (State->top()->type() == sys::fs::file_type::directory_file) {
- vfs::directory_iterator I = FS->dir_begin(State->top()->path(), EC);
- if (I != End) {
- State->push(I);
- return *this;
- }
- }
-
- while (!State->empty() && State->top().increment(EC) == End)
- State->pop();
-
- if (State->empty())
- State.reset(); // end iterator
-
- return *this;
-}
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 45be10f005..52fc08de9b 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -8,10 +8,10 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetOptions.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearchOptions.h"
@@ -37,6 +37,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
@@ -235,11 +236,12 @@ static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Address);
bool UseAfterScope = CGOpts.SanitizeAddressUseAfterScope;
+ bool UseOdrIndicator = CGOpts.SanitizeAddressUseOdrIndicator;
bool UseGlobalsGC = asanUseGlobalsGC(T, CGOpts);
PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/ false, Recover,
UseAfterScope));
PM.add(createAddressSanitizerModulePass(/*CompileKernel*/ false, Recover,
- UseGlobalsGC));
+ UseGlobalsGC, UseOdrIndicator));
}
static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
@@ -247,7 +249,8 @@ static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
PM.add(createAddressSanitizerFunctionPass(
/*CompileKernel*/ true, /*Recover*/ true, /*UseAfterScope*/ false));
PM.add(createAddressSanitizerModulePass(
- /*CompileKernel*/ true, /*Recover*/ true));
+ /*CompileKernel*/ true, /*Recover*/ true, /*UseGlobalsGC*/ true,
+ /*UseOdrIndicator*/ false));
}
static void addHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
@@ -428,7 +431,7 @@ static void initTargetOptions(llvm::TargetOptions &Options,
switch (LangOpts.getDefaultFPContractMode()) {
case LangOptions::FPC_Off:
// Preserve any contraction performed by the front-end. (Strict performs
- // splitting of the muladd instrinsic in the backend.)
+ // splitting of the muladd intrinsic in the backend.)
Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
break;
case LangOptions::FPC_On:
@@ -468,7 +471,7 @@ static void initTargetOptions(llvm::TargetOptions &Options,
Options.EmitStackSizeSection = CodeGenOpts.StackSizeSection;
Options.EmitAddrsig = CodeGenOpts.Addrsig;
- if (CodeGenOpts.EnableSplitDwarf)
+ if (CodeGenOpts.getSplitDwarfMode() != CodeGenOptions::NoFission)
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
@@ -503,6 +506,8 @@ static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
Options.UseCfgChecksum = CodeGenOpts.CoverageExtraChecksum;
Options.NoRedZone = CodeGenOpts.DisableRedZone;
Options.FunctionNamesInData = !CodeGenOpts.CoverageNoFunctionNamesInData;
+ Options.Filter = CodeGenOpts.ProfileFilterFiles;
+ Options.Exclude = CodeGenOpts.ProfileExcludeFiles;
Options.ExitBlockBeforeBody = CodeGenOpts.CoverageExitBlockBeforeBody;
return Options;
}
@@ -832,7 +837,8 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
break;
default:
- if (!CodeGenOpts.SplitDwarfFile.empty()) {
+ if (!CodeGenOpts.SplitDwarfFile.empty() &&
+ (CodeGenOpts.getSplitDwarfMode() == CodeGenOptions::SplitFileFission)) {
DwoOS = openOutputFile(CodeGenOpts.SplitDwarfFile);
if (!DwoOS)
return;
@@ -930,18 +936,21 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PGOOpt = PGOOptions(CodeGenOpts.InstrProfileOutput.empty()
? DefaultProfileGenName
: CodeGenOpts.InstrProfileOutput,
- "", "", true, CodeGenOpts.DebugInfoForProfiling);
+ "", "", "", true,
+ CodeGenOpts.DebugInfoForProfiling);
else if (CodeGenOpts.hasProfileIRUse())
// -fprofile-use.
- PGOOpt = PGOOptions("", CodeGenOpts.ProfileInstrumentUsePath, "", false,
+ PGOOpt = PGOOptions("", CodeGenOpts.ProfileInstrumentUsePath, "",
+ CodeGenOpts.ProfileRemappingFile, false,
CodeGenOpts.DebugInfoForProfiling);
else if (!CodeGenOpts.SampleProfileFile.empty())
// -fprofile-sample-use
- PGOOpt = PGOOptions("", "", CodeGenOpts.SampleProfileFile, false,
+ PGOOpt = PGOOptions("", "", CodeGenOpts.SampleProfileFile,
+ CodeGenOpts.ProfileRemappingFile, false,
CodeGenOpts.DebugInfoForProfiling);
else if (CodeGenOpts.DebugInfoForProfiling)
// -fdebug-info-for-profiling
- PGOOpt = PGOOptions("", "", "", false, true);
+ PGOOpt = PGOOptions("", "", "", "", false, true);
PassBuilder PB(TM.get(), PGOOpt);
@@ -1130,6 +1139,7 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
const LangOptions &LOpts,
std::unique_ptr<raw_pwrite_stream> OS,
std::string SampleProfile,
+ std::string ProfileRemapping,
BackendAction Action) {
StringMap<DenseMap<GlobalValue::GUID, GlobalValueSummary *>>
ModuleToDefinedGVSummaries;
@@ -1147,15 +1157,14 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
continue;
auto GUID = GlobalList.first;
- assert(GlobalList.second.SummaryList.size() == 1 &&
- "Expected individual combined index to have one summary per GUID");
- auto &Summary = GlobalList.second.SummaryList[0];
- // Skip the summaries for the importing module. These are included to
- // e.g. record required linkage changes.
- if (Summary->modulePath() == M->getModuleIdentifier())
- continue;
- // Add an entry to provoke importing by thinBackend.
- ImportList[Summary->modulePath()].insert(GUID);
+ for (auto &Summary : GlobalList.second.SummaryList) {
+ // Skip the summaries for the importing module. These are included to
+ // e.g. record required linkage changes.
+ if (Summary->modulePath() == M->getModuleIdentifier())
+ continue;
+ // Add an entry to provoke importing by thinBackend.
+ ImportList[Summary->modulePath()].insert(GUID);
+ }
}
std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
@@ -1202,6 +1211,7 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
Conf.CGOptLevel = getCGOptLevel(CGOpts);
initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
Conf.SampleProfile = std::move(SampleProfile);
+ Conf.ProfileRemapping = std::move(ProfileRemapping);
Conf.UseNewPM = CGOpts.ExperimentalNewPassManager;
Conf.DebugPassManager = CGOpts.DebugPassManager;
Conf.RemarksWithHotness = CGOpts.DiagnosticsWithHotness;
@@ -1268,7 +1278,7 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
if (!CombinedIndex->skipModuleByDistributedBackend()) {
runThinLTOBackend(CombinedIndex.get(), M, HeaderOpts, CGOpts, TOpts,
LOpts, std::move(OS), CGOpts.SampleProfileFile,
- Action);
+ CGOpts.ProfileRemappingFile, Action);
return;
}
// Distributed indexing detected that nothing from the module is needed
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 9379c835d3..24056a449d 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -18,7 +18,7 @@
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index dae148cbd1..6631bfb0df 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -175,7 +175,7 @@ static std::string getBlockDescriptorName(const CGBlockInfo &BlockInfo,
/// unsigned long reserved;
/// unsigned long size; // size of Block_literal metadata in bytes.
/// void *copy_func_helper_decl; // optional copy helper.
-/// void *destroy_func_decl; // optioanl destructor helper.
+/// void *destroy_func_decl; // optional destructor helper.
/// void *block_method_encoding_address; // @encode for block literal signature.
/// void *block_layout_info; // encoding of captured block variables.
/// };
@@ -197,7 +197,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
std::string descName;
// If an equivalent block descriptor global variable exists, return it.
- if (C.getLangOpts().ObjC1 &&
+ if (C.getLangOpts().ObjC &&
CGM.getLangOpts().getGC() == LangOptions::NonGC) {
descName = getBlockDescriptorName(blockInfo, CGM);
if (llvm::GlobalValue *desc = CGM.getModule().getNamedValue(descName))
@@ -243,7 +243,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer(), i8p));
// GC layout.
- if (C.getLangOpts().ObjC1) {
+ if (C.getLangOpts().ObjC) {
if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
elements.add(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
else
@@ -446,12 +446,25 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
assert(elementTypes.empty());
if (CGM.getLangOpts().OpenCL) {
- // The header is basically 'struct { int; int;
+ // The header is basically 'struct { int; int; generic void *;
// custom_fields; }'. Assert that struct is packed.
+ auto GenericAS =
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic);
+ auto GenPtrAlign =
+ CharUnits::fromQuantity(CGM.getTarget().getPointerAlign(GenericAS) / 8);
+ auto GenPtrSize =
+ CharUnits::fromQuantity(CGM.getTarget().getPointerWidth(GenericAS) / 8);
+ assert(CGM.getIntSize() <= GenPtrSize);
+ assert(CGM.getIntAlign() <= GenPtrAlign);
+ assert((2 * CGM.getIntSize()).isMultipleOf(GenPtrAlign));
elementTypes.push_back(CGM.IntTy); /* total size */
elementTypes.push_back(CGM.IntTy); /* align */
- unsigned Offset = 2 * CGM.getIntSize().getQuantity();
- unsigned BlockAlign = CGM.getIntAlign().getQuantity();
+ elementTypes.push_back(
+ CGM.getOpenCLRuntime()
+ .getGenericVoidPointerType()); /* invoke function */
+ unsigned Offset =
+ 2 * CGM.getIntSize().getQuantity() + GenPtrSize.getQuantity();
+ unsigned BlockAlign = GenPtrAlign.getQuantity();
if (auto *Helper =
CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
for (auto I : Helper->getCustomFieldTypes()) /* custom fields */ {
@@ -493,7 +506,11 @@ static QualType getCaptureFieldType(const CodeGenFunction &CGF,
return CGF.BlockInfo->getCapture(VD).fieldType();
if (auto *FD = CGF.LambdaCaptureFields.lookup(VD))
return FD->getType();
- return VD->getType();
+ // If the captured variable is a non-escaping __block variable, the field
+ // type is the reference type. If the variable is a __block variable that
+ // already has a reference type, the field type is the variable's type.
+ return VD->isNonEscapingByref() ?
+ CGF.getContext().getLValueReferenceType(VD->getType()) : VD->getType();
}
/// Compute the layout of the given block. Attempts to lay the block
@@ -516,7 +533,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.CanBeGlobal = true;
return;
}
- else if (C.getLangOpts().ObjC1 &&
+ else if (C.getLangOpts().ObjC &&
CGM.getLangOpts().getGC() == LangOptions::NonGC)
info.HasCapturedVariableLayout = true;
@@ -549,7 +566,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
for (const auto &CI : block->captures()) {
const VarDecl *variable = CI.getVariable();
- if (CI.isByRef()) {
+ if (CI.isEscapingByref()) {
// We have to copy/dispose of the __block reference.
info.NeedsCopyDispose = true;
@@ -842,10 +859,12 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
/// Enter a full-expression with a non-trivial number of objects to
/// clean up. This is in this file because, at the moment, the only
/// kind of cleanup object is a BlockDecl*.
-void CodeGenFunction::enterNonTrivialFullExpression(const ExprWithCleanups *E) {
- assert(E->getNumObjects() != 0);
- for (const ExprWithCleanups::CleanupObject &C : E->getObjects())
- enterBlockScope(*this, C);
+void CodeGenFunction::enterNonTrivialFullExpression(const FullExpr *E) {
+ if (const auto EWC = dyn_cast<ExprWithCleanups>(E)) {
+ assert(EWC->getNumObjects() != 0);
+ for (const ExprWithCleanups::CleanupObject &C : EWC->getObjects())
+ enterBlockScope(*this, C);
+ }
}
/// Find the layout for the given block in a linked list and remove it.
@@ -902,12 +921,20 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
bool IsOpenCL = CGM.getContext().getLangOpts().OpenCL;
+ auto GenVoidPtrTy =
+ IsOpenCL ? CGM.getOpenCLRuntime().getGenericVoidPointerType() : VoidPtrTy;
+ LangAS GenVoidPtrAddr = IsOpenCL ? LangAS::opencl_generic : LangAS::Default;
+ auto GenVoidPtrSize = CharUnits::fromQuantity(
+ CGM.getTarget().getPointerWidth(
+ CGM.getContext().getTargetAddressSpace(GenVoidPtrAddr)) /
+ 8);
// Using the computed layout, generate the actual block function.
bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
CodeGenFunction BlockCGF{CGM, true};
BlockCGF.SanOpts = SanOpts;
auto *InvokeFn = BlockCGF.GenerateBlockFunction(
CurGD, blockInfo, LocalDeclMap, isLambdaConv, blockInfo.CanBeGlobal);
+ auto *blockFn = llvm::ConstantExpr::getPointerCast(InvokeFn, GenVoidPtrTy);
// If there is nothing to capture, we can emit this as a global block.
if (blockInfo.CanBeGlobal)
@@ -983,12 +1010,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
llvm::ConstantInt::get(IntTy, blockInfo.BlockAlign.getQuantity()),
getIntSize(), "block.align");
}
- if (!IsOpenCL) {
- addHeaderField(llvm::ConstantExpr::getBitCast(InvokeFn, VoidPtrTy),
- getPointerSize(), "block.invoke");
+ addHeaderField(blockFn, GenVoidPtrSize, "block.invoke");
+ if (!IsOpenCL)
addHeaderField(descriptor, getPointerSize(), "block.descriptor");
- } else if (auto *Helper =
- CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
+ else if (auto *Helper =
+ CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
for (auto I : Helper->getCustomFieldValues(*this, blockInfo)) {
addHeaderField(
I.first,
@@ -1032,7 +1058,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// The lambda capture in a lambda's conversion-to-block-pointer is
// special; we'll simply emit it directly.
src = Address::invalid();
- } else if (CI.isByRef()) {
+ } else if (CI.isEscapingByref()) {
if (BlockInfo && CI.isNested()) {
// We need to use the capture from the enclosing block.
const CGBlockInfo::Capture &enclosingCapture =
@@ -1060,7 +1086,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// the block field. There's no need to chase the forwarding
// pointer at this point, since we're building something that will
// live a shorter life than the stack byref anyway.
- if (CI.isByRef()) {
+ if (CI.isEscapingByref()) {
// Get a void* that points to the byref struct.
llvm::Value *byrefPointer;
if (CI.isNested())
@@ -1192,23 +1218,38 @@ llvm::Type *CodeGenModule::getBlockDescriptorType() {
}
llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
- assert(!getLangOpts().OpenCL && "OpenCL does not need this");
-
if (GenericBlockLiteralType)
return GenericBlockLiteralType;
llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
- // struct __block_literal_generic {
- // void *__isa;
- // int __flags;
- // int __reserved;
- // void (*__invoke)(void *);
- // struct __block_descriptor *__descriptor;
- // };
- GenericBlockLiteralType =
- llvm::StructType::create("struct.__block_literal_generic", VoidPtrTy,
- IntTy, IntTy, VoidPtrTy, BlockDescPtrTy);
+ if (getLangOpts().OpenCL) {
+ // struct __opencl_block_literal_generic {
+ // int __size;
+ // int __align;
+ // __generic void *__invoke;
+ // /* custom fields */
+ // };
+ SmallVector<llvm::Type *, 8> StructFields(
+ {IntTy, IntTy, getOpenCLRuntime().getGenericVoidPointerType()});
+ if (auto *Helper = getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
+ for (auto I : Helper->getCustomFieldTypes())
+ StructFields.push_back(I);
+ }
+ GenericBlockLiteralType = llvm::StructType::create(
+ StructFields, "struct.__opencl_block_literal_generic");
+ } else {
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType =
+ llvm::StructType::create("struct.__block_literal_generic", VoidPtrTy,
+ IntTy, IntTy, VoidPtrTy, BlockDescPtrTy);
+ }
return GenericBlockLiteralType;
}
@@ -1219,21 +1260,27 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
E->getCallee()->getType()->getAs<BlockPointerType>();
llvm::Value *BlockPtr = EmitScalarExpr(E->getCallee());
- llvm::Value *FuncPtr = nullptr;
- if (!CGM.getLangOpts().OpenCL) {
- // Get a pointer to the generic block literal.
- llvm::Type *BlockLiteralTy =
- llvm::PointerType::get(CGM.getGenericBlockLiteralType(), 0);
+ // Get a pointer to the generic block literal.
+ // For OpenCL we generate generic AS void ptr to be able to reuse the same
+ // block definition for blocks with captures generated as private AS local
+ // variables and without captures generated as global AS program scope
+ // variables.
+ unsigned AddrSpace = 0;
+ if (getLangOpts().OpenCL)
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::opencl_generic);
- // Bitcast the callee to a block literal.
- BlockPtr =
- Builder.CreatePointerCast(BlockPtr, BlockLiteralTy, "block.literal");
+ llvm::Type *BlockLiteralTy =
+ llvm::PointerType::get(CGM.getGenericBlockLiteralType(), AddrSpace);
- // Get the function pointer from the literal.
- FuncPtr =
- Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr, 3);
- }
+ // Bitcast the callee to a block literal.
+ BlockPtr =
+ Builder.CreatePointerCast(BlockPtr, BlockLiteralTy, "block.literal");
+
+ // Get the function pointer from the literal.
+ llvm::Value *FuncPtr =
+ Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr,
+ CGM.getLangOpts().OpenCL ? 2 : 3);
// Add the block literal.
CallArgList Args;
@@ -1256,11 +1303,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
// Load the function.
- llvm::Value *Func;
- if (CGM.getLangOpts().OpenCL)
- Func = CGM.getOpenCLRuntime().getInvokeFunction(E->getCallee());
- else
- Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
+ llvm::Value *Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
const CGFunctionInfo &FnInfo =
@@ -1279,8 +1322,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
-Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
- bool isByRef) {
+Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable) {
assert(BlockInfo && "evaluating block ref without block information?");
const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable);
@@ -1291,7 +1333,7 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
capture.getOffset(), "block.capture.addr");
- if (isByRef) {
+ if (variable->isEscapingByref()) {
// addr should be a void** right now. Load, then cast the result
// to byref*.
@@ -1305,6 +1347,10 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
variable->getName());
}
+ assert((!variable->isNonEscapingByref() ||
+ capture.fieldType()->isReferenceType()) &&
+ "the capture field of a non-escaping variable should have a "
+ "reference type");
if (capture.fieldType()->isReferenceType())
addr = EmitLoadOfReference(MakeAddrLValue(addr, capture.fieldType()));
@@ -1373,14 +1419,14 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
// Reserved
fields.addInt(CGM.IntTy, 0);
-
- // Function
- fields.add(blockFn);
} else {
fields.addInt(CGM.IntTy, blockInfo.BlockSize.getQuantity());
fields.addInt(CGM.IntTy, blockInfo.BlockAlign.getQuantity());
}
+ // Function
+ fields.add(blockFn);
+
if (!IsOpenCL) {
// Descriptor
fields.add(buildBlockDescriptor(CGM, blockInfo));
@@ -1656,7 +1702,7 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
return std::make_pair(BlockCaptureEntityKind::CXXRecord, BlockFieldFlags());
}
BlockFieldFlags Flags;
- if (CI.isByRef()) {
+ if (CI.isEscapingByref()) {
Flags = BLOCK_FIELD_IS_BYREF;
if (T.isObjCGCWeak())
Flags |= BLOCK_FIELD_IS_WEAK;
@@ -1773,8 +1819,6 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
CodeGenModule &CGM) {
std::string Str;
ASTContext &Ctx = CGM.getContext();
- std::unique_ptr<ItaniumMangleContext> MC(
- ItaniumMangleContext::create(Ctx, Ctx.getDiagnostics()));
const BlockDecl::Capture &CI = *E.CI;
QualType CaptureTy = CI.getVariable()->getType();
@@ -1800,7 +1844,7 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
Str += "c";
SmallString<256> TyStr;
llvm::raw_svector_ostream Out(TyStr);
- MC->mangleTypeName(CaptureTy, Out);
+ CGM.getCXXABI().getMangleContext().mangleTypeName(CaptureTy, Out);
Str += llvm::to_string(TyStr.size()) + TyStr.c_str();
break;
}
@@ -1939,7 +1983,7 @@ static void setBlockHelperAttributesVisibility(bool CapturesNonExternalType,
} else {
Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
Fn->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- CGM.SetLLVMFunctionAttributes(nullptr, FI, Fn);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
}
}
@@ -1964,16 +2008,16 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
ASTContext &C = getContext();
+ QualType ReturnTy = C.VoidTy;
+
FunctionArgList args;
- ImplicitParamDecl DstDecl(getContext(), C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl DstDecl(C, C.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&DstDecl);
- ImplicitParamDecl SrcDecl(getContext(), C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, C.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
// FIXME: it would be nice if these were mergeable with things with
// identical semantics.
@@ -1983,20 +2027,20 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
llvm::Function::Create(LTy, llvm::GlobalValue::LinkOnceODRLinkage,
FuncName, &CGM.getModule());
- IdentifierInfo *II
- = &CGM.getContext().Idents.get(FuncName);
+ IdentifierInfo *II = &C.Idents.get(FuncName);
+
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(C.VoidPtrTy);
+ ArgTys.push_back(C.VoidPtrTy);
+ QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
- FunctionDecl *FD = FunctionDecl::Create(C,
- C.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, C.VoidTy,
- nullptr, SC_Static,
- false,
- false);
+ FunctionDecl *FD = FunctionDecl::Create(
+ C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
+ FunctionTy, nullptr, SC_Static, false, false);
setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
CGM);
- StartFunction(FD, C.VoidTy, Fn, FI, args);
+ StartFunction(FD, ReturnTy, Fn, FI, args);
ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getBeginLoc()};
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -2102,7 +2146,7 @@ getBlockFieldFlagsForObjCObjectPointer(const BlockDecl::Capture &CI,
static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
const LangOptions &LangOpts) {
- if (CI.isByRef()) {
+ if (CI.isEscapingByref()) {
BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
if (T.isObjCGCWeak())
Flags |= BLOCK_FIELD_IS_WEAK;
@@ -2157,13 +2201,14 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
ASTContext &C = getContext();
+ QualType ReturnTy = C.VoidTy;
+
FunctionArgList args;
- ImplicitParamDecl SrcDecl(getContext(), C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, C.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -2173,18 +2218,19 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
llvm::Function::Create(LTy, llvm::GlobalValue::LinkOnceODRLinkage,
FuncName, &CGM.getModule());
- IdentifierInfo *II
- = &CGM.getContext().Idents.get(FuncName);
+ IdentifierInfo *II = &C.Idents.get(FuncName);
- FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, C.VoidTy,
- nullptr, SC_Static,
- false, false);
+ SmallVector<QualType, 1> ArgTys;
+ ArgTys.push_back(C.VoidPtrTy);
+ QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
+
+ FunctionDecl *FD = FunctionDecl::Create(
+ C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
+ FunctionTy, nullptr, SC_Static, false, false);
setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
CGM);
- StartFunction(FD, C.VoidTy, Fn, FI, args);
+ StartFunction(FD, ReturnTy, Fn, FI, args);
markAsIgnoreThreadCheckingAtRuntime(Fn);
ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getBeginLoc()};
@@ -2403,19 +2449,17 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
BlockByrefHelpers &generator) {
ASTContext &Context = CGF.getContext();
- QualType R = Context.VoidTy;
+ QualType ReturnTy = Context.VoidTy;
FunctionArgList args;
- ImplicitParamDecl Dst(CGF.getContext(), Context.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl Dst(Context, Context.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&Dst);
- ImplicitParamDecl Src(CGF.getContext(), Context.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl Src(Context, Context.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&Src);
const CGFunctionInfo &FI =
- CGF.CGM.getTypes().arrangeBuiltinFunctionDeclaration(R, args);
+ CGF.CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
llvm::FunctionType *LTy = CGF.CGM.getTypes().GetFunctionType(FI);
@@ -2428,16 +2472,18 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
IdentifierInfo *II
= &Context.Idents.get("__Block_byref_object_copy_");
- FunctionDecl *FD = FunctionDecl::Create(Context,
- Context.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, R, nullptr,
- SC_Static,
- false, false);
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(Context.VoidPtrTy);
+ ArgTys.push_back(Context.VoidPtrTy);
+ QualType FunctionTy = Context.getFunctionType(ReturnTy, ArgTys, {});
+
+ FunctionDecl *FD = FunctionDecl::Create(
+ Context, Context.getTranslationUnitDecl(), SourceLocation(),
+ SourceLocation(), II, FunctionTy, nullptr, SC_Static, false, false);
CGF.CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
- CGF.StartFunction(FD, R, Fn, FI, args);
+ CGF.StartFunction(FD, ReturnTy, Fn, FI, args);
if (generator.needsCopy()) {
llvm::Type *byrefPtrType = byrefInfo.Type->getPointerTo(0);
@@ -2502,12 +2548,13 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
IdentifierInfo *II
= &Context.Idents.get("__Block_byref_object_dispose_");
- FunctionDecl *FD = FunctionDecl::Create(Context,
- Context.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, R, nullptr,
- SC_Static,
- false, false);
+ SmallVector<QualType, 1> ArgTys;
+ ArgTys.push_back(Context.VoidPtrTy);
+ QualType FunctionTy = Context.getFunctionType(R, ArgTys, {});
+
+ FunctionDecl *FD = FunctionDecl::Create(
+ Context, Context.getTranslationUnitDecl(), SourceLocation(),
+ SourceLocation(), II, FunctionTy, nullptr, SC_Static, false, false);
CGF.CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
@@ -2564,6 +2611,9 @@ BlockByrefHelpers *
CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission) {
const VarDecl &var = *emission.Variable;
+ assert(var.isEscapingByref() &&
+ "only escaping __block variables need byref helpers");
+
QualType type = var.getType();
auto &byrefInfo = getBlockByrefInfo(&var);
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index df71dbb4b4..eea9207a34 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -21,10 +21,11 @@
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
-#include "clang/Analysis/Analyses/OSLog.h"
+#include "clang/AST/OSLog.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
@@ -93,11 +94,11 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
return V;
}
-/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// Utility to insert an atomic instruction based on Intrinsic::ID
/// and the expression node.
-static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
- llvm::AtomicRMWInst::BinOp Kind,
- const CallExpr *E) {
+static Value *MakeBinaryAtomicValue(
+ CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
+ AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
@@ -119,7 +120,7 @@ static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
- Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
+ Kind, Args[0], Args[1], Ordering);
return EmitFromInt(CGF, Result, T, ValueType);
}
@@ -151,7 +152,7 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
}
-/// Utility to insert an atomic instruction based Instrinsic::ID and
+/// Utility to insert an atomic instruction based Intrinsic::ID and
/// the expression node, where the return value is the result of the
/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
@@ -200,6 +201,9 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
/// cmpxchg result or the old value.
///
/// @returns result of cmpxchg, according to ReturnBool
+///
+/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
+/// invoke the function EmitAtomicCmpXchgForMSIntrin.
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
bool ReturnBool) {
QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
@@ -230,6 +234,72 @@ static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
ValueType);
}
+/// This function should be invoked to emit atomic cmpxchg for Microsoft's
+/// _InterlockedCompareExchange* intrinsics which have the following signature:
+/// T _InterlockedCompareExchange(T volatile *Destination,
+/// T Exchange,
+/// T Comparand);
+///
+/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
+/// cmpxchg *Destination, Comparand, Exchange.
+/// So we need to swap Comparand and Exchange when invoking
+/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
+/// function MakeAtomicCmpXchgValue since it expects the arguments to be
+/// already swapped.
+
+static
+Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
+ AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(
+ E->getType(), E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
+ E->getArg(1)->getType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
+ E->getArg(2)->getType()));
+
+ auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
+ auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
+ auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
+
+ // For Release ordering, the failure ordering should be Monotonic.
+ auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
+ AtomicOrdering::Monotonic :
+ SuccessOrdering;
+
+ auto *Result = CGF.Builder.CreateAtomicCmpXchg(
+ Destination, Comparand, Exchange,
+ SuccessOrdering, FailureOrdering);
+ Result->setVolatile(true);
+ return CGF.Builder.CreateExtractValue(Result, 0);
+}
+
+static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
+ AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
+ assert(E->getArg(0)->getType()->isPointerType());
+
+ auto *IntTy = CGF.ConvertType(E->getType());
+ auto *Result = CGF.Builder.CreateAtomicRMW(
+ AtomicRMWInst::Add,
+ CGF.EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(IntTy, 1),
+ Ordering);
+ return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
+}
+
+static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
+ AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
+ assert(E->getArg(0)->getType()->isPointerType());
+
+ auto *IntTy = CGF.ConvertType(E->getType());
+ auto *Result = CGF.Builder.CreateAtomicRMW(
+ AtomicRMWInst::Sub,
+ CGF.EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(IntTy, 1),
+ Ordering);
+ return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
+}
+
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
@@ -316,7 +386,7 @@ static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
const CallExpr *E, llvm::Constant *calleeValue) {
- CGCallee callee = CGCallee::forDirect(calleeValue, FD);
+ CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
}
@@ -485,7 +555,7 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
}
namespace {
-/// A struct to generically desribe a bit test intrinsic.
+/// A struct to generically describe a bit test intrinsic.
struct BitTest {
enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
enum InterlockingKind : uint8_t {
@@ -711,8 +781,11 @@ static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
} else {
Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
Arg1Ty = CGF.Int8PtrTy;
- Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress),
- llvm::ConstantInt::get(CGF.Int32Ty, 0));
+ if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
+ Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::sponentry));
+ } else
+ Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress),
+ llvm::ConstantInt::get(CGF.Int32Ty, 0));
}
// Mark the call site and declaration with ReturnsTwice.
@@ -745,6 +818,30 @@ enum class CodeGenFunction::MSVCIntrin {
_InterlockedIncrement,
_InterlockedOr,
_InterlockedXor,
+ _InterlockedExchangeAdd_acq,
+ _InterlockedExchangeAdd_rel,
+ _InterlockedExchangeAdd_nf,
+ _InterlockedExchange_acq,
+ _InterlockedExchange_rel,
+ _InterlockedExchange_nf,
+ _InterlockedCompareExchange_acq,
+ _InterlockedCompareExchange_rel,
+ _InterlockedCompareExchange_nf,
+ _InterlockedOr_acq,
+ _InterlockedOr_rel,
+ _InterlockedOr_nf,
+ _InterlockedXor_acq,
+ _InterlockedXor_rel,
+ _InterlockedXor_nf,
+ _InterlockedAnd_acq,
+ _InterlockedAnd_rel,
+ _InterlockedAnd_nf,
+ _InterlockedIncrement_acq,
+ _InterlockedIncrement_rel,
+ _InterlockedIncrement_nf,
+ _InterlockedDecrement_acq,
+ _InterlockedDecrement_rel,
+ _InterlockedDecrement_nf,
__fastfail,
};
@@ -811,25 +908,74 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
case MSVCIntrin::_InterlockedXor:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
-
- case MSVCIntrin::_InterlockedDecrement: {
- llvm::Type *IntTy = ConvertType(E->getType());
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Sub,
- EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- llvm::AtomicOrdering::SequentiallyConsistent);
- return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1));
- }
- case MSVCIntrin::_InterlockedIncrement: {
- llvm::Type *IntTy = ConvertType(E->getType());
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Add,
- EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- llvm::AtomicOrdering::SequentiallyConsistent);
- return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1));
- }
+ case MSVCIntrin::_InterlockedExchangeAdd_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedExchangeAdd_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedExchangeAdd_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedExchange_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedExchange_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedExchange_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedCompareExchange_acq:
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedCompareExchange_rel:
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedCompareExchange_nf:
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedOr_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedOr_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedOr_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedXor_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedXor_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedXor_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedAnd_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedAnd_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedAnd_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedIncrement_acq:
+ return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedIncrement_rel:
+ return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedIncrement_nf:
+ return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedDecrement_acq:
+ return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedDecrement_rel:
+ return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedDecrement_nf:
+ return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
+
+ case MSVCIntrin::_InterlockedDecrement:
+ return EmitAtomicDecrementValue(*this, E);
+ case MSVCIntrin::_InterlockedIncrement:
+ return EmitAtomicIncrementValue(*this, E);
case MSVCIntrin::__fastfail: {
// Request immediate process termination from the kernel. The instruction
@@ -923,35 +1069,42 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
if (llvm::Function *F = CGM.getModule().getFunction(Name))
return F;
+ llvm::SmallVector<QualType, 4> ArgTys;
llvm::SmallVector<ImplicitParamDecl, 4> Params;
Params.emplace_back(Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"),
Ctx.VoidPtrTy, ImplicitParamDecl::Other);
+ ArgTys.emplace_back(Ctx.VoidPtrTy);
for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
char Size = Layout.Items[I].getSizeByte();
if (!Size)
continue;
+ QualType ArgTy = getOSLogArgType(Ctx, Size);
Params.emplace_back(
Ctx, nullptr, SourceLocation(),
- &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)),
- getOSLogArgType(Ctx, Size), ImplicitParamDecl::Other);
+ &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
+ ImplicitParamDecl::Other);
+ ArgTys.emplace_back(ArgTy);
}
FunctionArgList Args;
for (auto &P : Params)
Args.push_back(&P);
+ QualType ReturnTy = Ctx.VoidTy;
+ QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
+
// The helper function has linkonce_odr linkage to enable the linker to merge
// identical functions. To ensure the merging always happens, 'noinline' is
// attached to the function when compiling with -Oz.
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn = llvm::Function::Create(
FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CGM.SetLLVMFunctionAttributes(nullptr, FI, Fn);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
// Attach 'noinline' at -Oz.
@@ -962,9 +1115,9 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
IdentifierInfo *II = &Ctx.Idents.get(Name);
FunctionDecl *FD = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
- Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false);
+ FuncionTy, nullptr, SC_PrivateExtern, false, false);
- StartFunction(FD, Ctx.VoidTy, Fn, FI, Args);
+ StartFunction(FD, ReturnTy, Fn, FI, Args);
// Create a scope with an artificial location for the body of this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
@@ -1024,7 +1177,12 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
llvm::Value *ArgVal;
- if (const Expr *TheExpr = Item.getExpr()) {
+ if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
+ uint64_t Val = 0;
+ for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
+ Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
+ ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
+ } else if (const Expr *TheExpr = Item.getExpr()) {
ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
// Check if this is a retainable type.
@@ -1252,6 +1410,42 @@ static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
return Res;
}
+static bool
+TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
+ llvm::SmallPtrSetImpl<const Decl *> &Seen) {
+ if (const auto *Arr = Ctx.getAsArrayType(Ty))
+ Ty = Ctx.getBaseElementType(Arr);
+
+ const auto *Record = Ty->getAsCXXRecordDecl();
+ if (!Record)
+ return false;
+
+ // We've already checked this type, or are in the process of checking it.
+ if (!Seen.insert(Record).second)
+ return false;
+
+ assert(Record->hasDefinition() &&
+ "Incomplete types should already be diagnosed");
+
+ if (Record->isDynamicClass())
+ return true;
+
+ for (FieldDecl *F : Record->fields()) {
+ if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
+ return true;
+ }
+ return false;
+}
+
+/// Determine if the specified type requires laundering by checking if it is a
+/// dynamic class type or contains a subobject which is a dynamic class type.
+static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
+ if (!CGM.getCodeGenOpts().StrictVTablePointers)
+ return false;
+ llvm::SmallPtrSet<const Decl *, 16> Seen;
+ return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
+}
+
RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
llvm::Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
@@ -1267,9 +1461,10 @@ RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
}
-RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
- unsigned BuiltinID, const CallExpr *E,
+RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
+ const CallExpr *E,
ReturnValueSlot ReturnValue) {
+ const FunctionDecl *FD = GD.getDecl()->getAsFunction();
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
@@ -1644,6 +1839,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
+ case Builtin::BI__lzcnt16:
+ case Builtin::BI__lzcnt:
+ case Builtin::BI__lzcnt64: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
case Builtin::BI__popcnt16:
case Builtin::BI__popcnt:
case Builtin::BI__popcnt64:
@@ -1662,46 +1872,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
- case Builtin::BI_rotr8:
- case Builtin::BI_rotr16:
- case Builtin::BI_rotr:
- case Builtin::BI_lrotr:
- case Builtin::BI_rotr64: {
- Value *Val = EmitScalarExpr(E->getArg(0));
- Value *Shift = EmitScalarExpr(E->getArg(1));
-
- llvm::Type *ArgType = Val->getType();
- Shift = Builder.CreateIntCast(Shift, ArgType, false);
- unsigned ArgWidth = ArgType->getIntegerBitWidth();
- Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
-
- Value *RightShiftAmt = Builder.CreateAnd(Shift, Mask);
- Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
- Value *LeftShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
- Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
- Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
- return RValue::get(Result);
- }
- case Builtin::BI_rotl8:
- case Builtin::BI_rotl16:
- case Builtin::BI_rotl:
- case Builtin::BI_lrotl:
- case Builtin::BI_rotl64: {
- Value *Val = EmitScalarExpr(E->getArg(0));
- Value *Shift = EmitScalarExpr(E->getArg(1));
-
- llvm::Type *ArgType = Val->getType();
- Shift = Builder.CreateIntCast(Shift, ArgType, false);
- unsigned ArgWidth = ArgType->getIntegerBitWidth();
- Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
-
- Value *LeftShiftAmt = Builder.CreateAnd(Shift, Mask);
- Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
- Value *RightShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
- Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
- Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
- return RValue::get(Result);
- }
case Builtin::BI__builtin_unpredictable: {
// Always return the argument of __builtin_unpredictable. LLVM does not
// handle this builtin. Metadata for this builtin should be added directly
@@ -1760,14 +1930,44 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_rotateleft16:
case Builtin::BI__builtin_rotateleft32:
case Builtin::BI__builtin_rotateleft64:
+ case Builtin::BI_rotl8: // Microsoft variants of rotate left
+ case Builtin::BI_rotl16:
+ case Builtin::BI_rotl:
+ case Builtin::BI_lrotl:
+ case Builtin::BI_rotl64:
return emitRotate(E, false);
case Builtin::BI__builtin_rotateright8:
case Builtin::BI__builtin_rotateright16:
case Builtin::BI__builtin_rotateright32:
case Builtin::BI__builtin_rotateright64:
+ case Builtin::BI_rotr8: // Microsoft variants of rotate right
+ case Builtin::BI_rotr16:
+ case Builtin::BI_rotr:
+ case Builtin::BI_lrotr:
+ case Builtin::BI_rotr64:
return emitRotate(E, true);
+ case Builtin::BI__builtin_constant_p: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ // At -O0, we don't perform inlining, so we don't need to delay the
+ // processing.
+ return RValue::get(ConstantInt::get(ResultType, 0));
+
+ const Expr *Arg = E->getArg(0);
+ QualType ArgType = Arg->getType();
+ if (!hasScalarEvaluationKind(ArgType) || ArgType->isFunctionType())
+ // We can only reason about scalar types.
+ return RValue::get(ConstantInt::get(ResultType, 0));
+
+ Value *ArgValue = EmitScalarExpr(Arg);
+ Value *F = CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
+ Value *Result = Builder.CreateCall(F, ArgValue);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
+ return RValue::get(Result);
+ }
case Builtin::BI__builtin_object_size: {
unsigned Type =
E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
@@ -2032,10 +2232,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin___memcpy_chk: {
// fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
- llvm::APSInt Size, DstSize;
- if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
- !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ Expr::EvalResult SizeResult, DstSizeResult;
+ if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
break;
+ llvm::APSInt Size = SizeResult.Val.getInt();
+ llvm::APSInt DstSize = DstSizeResult.Val.getInt();
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -2056,10 +2258,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin___memmove_chk: {
// fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
- llvm::APSInt Size, DstSize;
- if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
- !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ Expr::EvalResult SizeResult, DstSizeResult;
+ if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
break;
+ llvm::APSInt Size = SizeResult.Val.getInt();
+ llvm::APSInt DstSize = DstSizeResult.Val.getInt();
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -2094,10 +2298,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
- llvm::APSInt Size, DstSize;
- if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
- !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ Expr::EvalResult SizeResult, DstSizeResult;
+ if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
break;
+ llvm::APSInt Size = SizeResult.Val.getInt();
+ llvm::APSInt DstSize = DstSizeResult.Val.getInt();
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -2305,6 +2511,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_launder: {
+ const Expr *Arg = E->getArg(0);
+ QualType ArgTy = Arg->getType()->getPointeeType();
+ Value *Ptr = EmitScalarExpr(Arg);
+ if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
+ Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
+
+ return RValue::get(Ptr);
+ }
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_or:
@@ -2999,7 +3214,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_InterlockedExchangePointer:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
- case Builtin::BI_InterlockedCompareExchangePointer: {
+ case Builtin::BI_InterlockedCompareExchangePointer:
+ case Builtin::BI_InterlockedCompareExchangePointer_nf: {
llvm::Type *RTy;
llvm::IntegerType *IntType =
IntegerType::get(getLLVMContext(),
@@ -3016,10 +3232,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Value *Comparand =
Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
- auto Result =
- Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
- AtomicOrdering::SequentiallyConsistent,
- AtomicOrdering::SequentiallyConsistent);
+ auto Ordering =
+ BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
+ AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
+
+ auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
+ Ordering, Ordering);
Result->setVolatile(true);
return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
@@ -3029,16 +3247,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_InterlockedCompareExchange8:
case Builtin::BI_InterlockedCompareExchange16:
case Builtin::BI_InterlockedCompareExchange:
- case Builtin::BI_InterlockedCompareExchange64: {
- AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(1)),
- AtomicOrdering::SequentiallyConsistent,
- AtomicOrdering::SequentiallyConsistent);
- CXI->setVolatile(true);
- return RValue::get(Builder.CreateExtractValue(CXI, 0));
- }
+ case Builtin::BI_InterlockedCompareExchange64:
+ return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
case Builtin::BI_InterlockedIncrement16:
case Builtin::BI_InterlockedIncrement:
return RValue::get(
@@ -3457,7 +3667,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Value *ClkEvent = EmitScalarExpr(E->getArg(5));
// Convert to generic address space.
EventList = Builder.CreatePointerCast(EventList, EventPtrTy);
- ClkEvent = Builder.CreatePointerCast(ClkEvent, EventPtrTy);
+ ClkEvent = ClkEvent->getType()->isIntegerTy()
+ ? Builder.CreateBitOrPointerCast(ClkEvent, EventPtrTy)
+ : Builder.CreatePointerCast(ClkEvent, EventPtrTy);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
llvm::Value *Kernel =
@@ -3591,13 +3803,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_os_log_format:
return emitBuiltinOSLogFormat(*E);
- case Builtin::BI__builtin_os_log_format_buffer_size: {
- analyze_os_log::OSLogBufferLayout Layout;
- analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout);
- return RValue::get(ConstantInt::get(ConvertType(E->getType()),
- Layout.size().getQuantity()));
- }
-
case Builtin::BI__xray_customevent: {
if (!ShouldXRayInstrumentFunction())
return RValue::getIgnored();
@@ -4365,6 +4570,14 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
NEONMAP0(vfmaq_v),
+ NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
+ NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
+ NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
+ NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
+ NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
+ NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
+ NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
+ NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
@@ -5338,6 +5551,34 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
}
+ case NEON::BI__builtin_neon_vfmlal_low_v:
+ case NEON::BI__builtin_neon_vfmlalq_low_v: {
+ llvm::Type *InputTy =
+ llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
+ }
+ case NEON::BI__builtin_neon_vfmlsl_low_v:
+ case NEON::BI__builtin_neon_vfmlslq_low_v: {
+ llvm::Type *InputTy =
+ llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
+ }
+ case NEON::BI__builtin_neon_vfmlal_high_v:
+ case NEON::BI__builtin_neon_vfmlalq_high_v: {
+ llvm::Type *InputTy =
+ llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
+ }
+ case NEON::BI__builtin_neon_vfmlsl_high_v:
+ case NEON::BI__builtin_neon_vfmlslq_high_v: {
+ llvm::Type *InputTy =
+ llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
+ }
}
assert(Int && "Expected valid intrinsic number");
@@ -5585,10 +5826,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
- APSInt Value;
- if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
+ Expr::EvalResult Result;
+ if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
llvm_unreachable("Sema will ensure that the parameter is constant");
+ llvm::APSInt Value = Result.Val.getInt();
uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
llvm::InlineAsm *Emit =
@@ -6070,6 +6312,120 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
case ARM::BI_InterlockedIncrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
+ case ARM::BI_InterlockedExchangeAdd8_acq:
+ case ARM::BI_InterlockedExchangeAdd16_acq:
+ case ARM::BI_InterlockedExchangeAdd_acq:
+ case ARM::BI_InterlockedExchangeAdd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
+ case ARM::BI_InterlockedExchangeAdd8_rel:
+ case ARM::BI_InterlockedExchangeAdd16_rel:
+ case ARM::BI_InterlockedExchangeAdd_rel:
+ case ARM::BI_InterlockedExchangeAdd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
+ case ARM::BI_InterlockedExchangeAdd8_nf:
+ case ARM::BI_InterlockedExchangeAdd16_nf:
+ case ARM::BI_InterlockedExchangeAdd_nf:
+ case ARM::BI_InterlockedExchangeAdd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
+ case ARM::BI_InterlockedExchange8_acq:
+ case ARM::BI_InterlockedExchange16_acq:
+ case ARM::BI_InterlockedExchange_acq:
+ case ARM::BI_InterlockedExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
+ case ARM::BI_InterlockedExchange8_rel:
+ case ARM::BI_InterlockedExchange16_rel:
+ case ARM::BI_InterlockedExchange_rel:
+ case ARM::BI_InterlockedExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
+ case ARM::BI_InterlockedExchange8_nf:
+ case ARM::BI_InterlockedExchange16_nf:
+ case ARM::BI_InterlockedExchange_nf:
+ case ARM::BI_InterlockedExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
+ case ARM::BI_InterlockedCompareExchange8_acq:
+ case ARM::BI_InterlockedCompareExchange16_acq:
+ case ARM::BI_InterlockedCompareExchange_acq:
+ case ARM::BI_InterlockedCompareExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
+ case ARM::BI_InterlockedCompareExchange8_rel:
+ case ARM::BI_InterlockedCompareExchange16_rel:
+ case ARM::BI_InterlockedCompareExchange_rel:
+ case ARM::BI_InterlockedCompareExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
+ case ARM::BI_InterlockedCompareExchange8_nf:
+ case ARM::BI_InterlockedCompareExchange16_nf:
+ case ARM::BI_InterlockedCompareExchange_nf:
+ case ARM::BI_InterlockedCompareExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
+ case ARM::BI_InterlockedOr8_acq:
+ case ARM::BI_InterlockedOr16_acq:
+ case ARM::BI_InterlockedOr_acq:
+ case ARM::BI_InterlockedOr64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
+ case ARM::BI_InterlockedOr8_rel:
+ case ARM::BI_InterlockedOr16_rel:
+ case ARM::BI_InterlockedOr_rel:
+ case ARM::BI_InterlockedOr64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
+ case ARM::BI_InterlockedOr8_nf:
+ case ARM::BI_InterlockedOr16_nf:
+ case ARM::BI_InterlockedOr_nf:
+ case ARM::BI_InterlockedOr64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
+ case ARM::BI_InterlockedXor8_acq:
+ case ARM::BI_InterlockedXor16_acq:
+ case ARM::BI_InterlockedXor_acq:
+ case ARM::BI_InterlockedXor64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
+ case ARM::BI_InterlockedXor8_rel:
+ case ARM::BI_InterlockedXor16_rel:
+ case ARM::BI_InterlockedXor_rel:
+ case ARM::BI_InterlockedXor64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
+ case ARM::BI_InterlockedXor8_nf:
+ case ARM::BI_InterlockedXor16_nf:
+ case ARM::BI_InterlockedXor_nf:
+ case ARM::BI_InterlockedXor64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
+ case ARM::BI_InterlockedAnd8_acq:
+ case ARM::BI_InterlockedAnd16_acq:
+ case ARM::BI_InterlockedAnd_acq:
+ case ARM::BI_InterlockedAnd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
+ case ARM::BI_InterlockedAnd8_rel:
+ case ARM::BI_InterlockedAnd16_rel:
+ case ARM::BI_InterlockedAnd_rel:
+ case ARM::BI_InterlockedAnd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
+ case ARM::BI_InterlockedAnd8_nf:
+ case ARM::BI_InterlockedAnd16_nf:
+ case ARM::BI_InterlockedAnd_nf:
+ case ARM::BI_InterlockedAnd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
+ case ARM::BI_InterlockedIncrement16_acq:
+ case ARM::BI_InterlockedIncrement_acq:
+ case ARM::BI_InterlockedIncrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
+ case ARM::BI_InterlockedIncrement16_rel:
+ case ARM::BI_InterlockedIncrement_rel:
+ case ARM::BI_InterlockedIncrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
+ case ARM::BI_InterlockedIncrement16_nf:
+ case ARM::BI_InterlockedIncrement_nf:
+ case ARM::BI_InterlockedIncrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
+ case ARM::BI_InterlockedDecrement16_acq:
+ case ARM::BI_InterlockedDecrement_acq:
+ case ARM::BI_InterlockedDecrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
+ case ARM::BI_InterlockedDecrement16_rel:
+ case ARM::BI_InterlockedDecrement_rel:
+ case ARM::BI_InterlockedDecrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
+ case ARM::BI_InterlockedDecrement16_nf:
+ case ARM::BI_InterlockedDecrement_nf:
+ case ARM::BI_InterlockedDecrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
}
// Get the last argument, which specifies the vector type.
@@ -6576,11 +6932,33 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
}
+ if (BuiltinID == AArch64::BI__getReg) {
+ Expr::EvalResult Result;
+ if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
+ llvm_unreachable("Sema will ensure that the parameter is constant");
+
+ llvm::APSInt Value = Result.Val.getInt();
+ LLVMContext &Context = CGM.getLLVMContext();
+ std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
+
+ llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+
+ llvm::Value *F =
+ CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
+ return Builder.CreateCall(F, Metadata);
+ }
+
if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
return Builder.CreateCall(F);
}
+ if (BuiltinID == AArch64::BI_ReadWriteBarrier)
+ return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
+ llvm::SyncScope::SingleThread);
+
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
@@ -6643,6 +7021,48 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
}
+ if (BuiltinID == AArch64::BI_ReadStatusReg ||
+ BuiltinID == AArch64::BI_WriteStatusReg) {
+ LLVMContext &Context = CGM.getLLVMContext();
+
+ unsigned SysReg =
+ E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
+
+ std::string SysRegStr;
+ llvm::raw_string_ostream(SysRegStr) <<
+ ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
+ ((SysReg >> 11) & 7) << ":" <<
+ ((SysReg >> 7) & 15) << ":" <<
+ ((SysReg >> 3) & 15) << ":" <<
+ ( SysReg & 7);
+
+ llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+
+ llvm::Type *RegisterType = Int64Ty;
+ llvm::Type *ValueType = Int32Ty;
+ llvm::Type *Types[] = { RegisterType };
+
+ if (BuiltinID == AArch64::BI_ReadStatusReg) {
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ llvm::Value *Call = Builder.CreateCall(F, Metadata);
+
+ return Builder.CreateTrunc(Call, ValueType);
+ }
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
+ ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
+
+ return Builder.CreateCall(F, { Metadata, ArgValue });
+ }
+
+ if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
+ llvm::Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
+ return Builder.CreateCall(F);
+ }
+
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
@@ -6738,7 +7158,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvth_f16_u32:
case NEON::BI__builtin_neon_vcvth_f16_u64:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvth_f16_s16:
case NEON::BI__builtin_neon_vcvth_f16_s32:
case NEON::BI__builtin_neon_vcvth_f16_s64: {
@@ -6758,7 +7178,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vcvth_u16_f16:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvth_s16_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
@@ -6768,7 +7188,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vcvth_u32_f16:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvth_s32_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
@@ -6778,7 +7198,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vcvth_u64_f16:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvth_s64_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
@@ -8493,6 +8913,129 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
case AArch64::BI_InterlockedIncrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
+ case AArch64::BI_InterlockedExchangeAdd8_acq:
+ case AArch64::BI_InterlockedExchangeAdd16_acq:
+ case AArch64::BI_InterlockedExchangeAdd_acq:
+ case AArch64::BI_InterlockedExchangeAdd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
+ case AArch64::BI_InterlockedExchangeAdd8_rel:
+ case AArch64::BI_InterlockedExchangeAdd16_rel:
+ case AArch64::BI_InterlockedExchangeAdd_rel:
+ case AArch64::BI_InterlockedExchangeAdd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
+ case AArch64::BI_InterlockedExchangeAdd8_nf:
+ case AArch64::BI_InterlockedExchangeAdd16_nf:
+ case AArch64::BI_InterlockedExchangeAdd_nf:
+ case AArch64::BI_InterlockedExchangeAdd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
+ case AArch64::BI_InterlockedExchange8_acq:
+ case AArch64::BI_InterlockedExchange16_acq:
+ case AArch64::BI_InterlockedExchange_acq:
+ case AArch64::BI_InterlockedExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
+ case AArch64::BI_InterlockedExchange8_rel:
+ case AArch64::BI_InterlockedExchange16_rel:
+ case AArch64::BI_InterlockedExchange_rel:
+ case AArch64::BI_InterlockedExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
+ case AArch64::BI_InterlockedExchange8_nf:
+ case AArch64::BI_InterlockedExchange16_nf:
+ case AArch64::BI_InterlockedExchange_nf:
+ case AArch64::BI_InterlockedExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
+ case AArch64::BI_InterlockedCompareExchange8_acq:
+ case AArch64::BI_InterlockedCompareExchange16_acq:
+ case AArch64::BI_InterlockedCompareExchange_acq:
+ case AArch64::BI_InterlockedCompareExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
+ case AArch64::BI_InterlockedCompareExchange8_rel:
+ case AArch64::BI_InterlockedCompareExchange16_rel:
+ case AArch64::BI_InterlockedCompareExchange_rel:
+ case AArch64::BI_InterlockedCompareExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
+ case AArch64::BI_InterlockedCompareExchange8_nf:
+ case AArch64::BI_InterlockedCompareExchange16_nf:
+ case AArch64::BI_InterlockedCompareExchange_nf:
+ case AArch64::BI_InterlockedCompareExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
+ case AArch64::BI_InterlockedOr8_acq:
+ case AArch64::BI_InterlockedOr16_acq:
+ case AArch64::BI_InterlockedOr_acq:
+ case AArch64::BI_InterlockedOr64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
+ case AArch64::BI_InterlockedOr8_rel:
+ case AArch64::BI_InterlockedOr16_rel:
+ case AArch64::BI_InterlockedOr_rel:
+ case AArch64::BI_InterlockedOr64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
+ case AArch64::BI_InterlockedOr8_nf:
+ case AArch64::BI_InterlockedOr16_nf:
+ case AArch64::BI_InterlockedOr_nf:
+ case AArch64::BI_InterlockedOr64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
+ case AArch64::BI_InterlockedXor8_acq:
+ case AArch64::BI_InterlockedXor16_acq:
+ case AArch64::BI_InterlockedXor_acq:
+ case AArch64::BI_InterlockedXor64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
+ case AArch64::BI_InterlockedXor8_rel:
+ case AArch64::BI_InterlockedXor16_rel:
+ case AArch64::BI_InterlockedXor_rel:
+ case AArch64::BI_InterlockedXor64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
+ case AArch64::BI_InterlockedXor8_nf:
+ case AArch64::BI_InterlockedXor16_nf:
+ case AArch64::BI_InterlockedXor_nf:
+ case AArch64::BI_InterlockedXor64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
+ case AArch64::BI_InterlockedAnd8_acq:
+ case AArch64::BI_InterlockedAnd16_acq:
+ case AArch64::BI_InterlockedAnd_acq:
+ case AArch64::BI_InterlockedAnd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
+ case AArch64::BI_InterlockedAnd8_rel:
+ case AArch64::BI_InterlockedAnd16_rel:
+ case AArch64::BI_InterlockedAnd_rel:
+ case AArch64::BI_InterlockedAnd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
+ case AArch64::BI_InterlockedAnd8_nf:
+ case AArch64::BI_InterlockedAnd16_nf:
+ case AArch64::BI_InterlockedAnd_nf:
+ case AArch64::BI_InterlockedAnd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
+ case AArch64::BI_InterlockedIncrement16_acq:
+ case AArch64::BI_InterlockedIncrement_acq:
+ case AArch64::BI_InterlockedIncrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
+ case AArch64::BI_InterlockedIncrement16_rel:
+ case AArch64::BI_InterlockedIncrement_rel:
+ case AArch64::BI_InterlockedIncrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
+ case AArch64::BI_InterlockedIncrement16_nf:
+ case AArch64::BI_InterlockedIncrement_nf:
+ case AArch64::BI_InterlockedIncrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
+ case AArch64::BI_InterlockedDecrement16_acq:
+ case AArch64::BI_InterlockedDecrement_acq:
+ case AArch64::BI_InterlockedDecrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
+ case AArch64::BI_InterlockedDecrement16_rel:
+ case AArch64::BI_InterlockedDecrement_rel:
+ case AArch64::BI_InterlockedDecrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
+ case AArch64::BI_InterlockedDecrement16_nf:
+ case AArch64::BI_InterlockedDecrement_nf:
+ case AArch64::BI_InterlockedDecrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
+
+ case AArch64::BI_InterlockedAdd: {
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ Value *Arg1 = EmitScalarExpr(E->getArg(1));
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Add, Arg0, Arg1,
+ llvm::AtomicOrdering::SequentiallyConsistent);
+ return Builder.CreateAdd(RMWI, Arg1);
+ }
}
}
@@ -8947,7 +9490,7 @@ static Value *EmitX86AddSubSatExpr(CodeGenFunction &CGF, const CallExpr *E,
Value *Res;
if (IsAddition) {
// ADDUS: a > (a+b) ? ~0 : (a+b)
- // If Ops[0] > Add, overflow occured.
+ // If Ops[0] > Add, overflow occurred.
Value *Add = CGF.Builder.CreateAdd(Ops[0], Ops[1]);
Value *ICmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_UGT, Ops[0], Add);
Value *Max = llvm::Constant::getAllOnesValue(ResultType);
@@ -9018,17 +9561,17 @@ Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
return EmitX86CpuSupports(FeatureStr);
}
-uint32_t
+uint64_t
CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
// Processor features and mapping to processor feature value.
- uint32_t FeaturesMask = 0;
+ uint64_t FeaturesMask = 0;
for (const StringRef &FeatureStr : FeatureStrs) {
unsigned Feature =
StringSwitch<unsigned>(FeatureStr)
#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL)
#include "llvm/Support/X86TargetParser.def"
;
- FeaturesMask |= (1U << Feature);
+ FeaturesMask |= (1ULL << Feature);
}
return FeaturesMask;
}
@@ -9037,31 +9580,54 @@ Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
}
-llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint32_t FeaturesMask) {
- // Matching the struct layout from the compiler-rt/libgcc structure that is
- // filled in:
- // unsigned int __cpu_vendor;
- // unsigned int __cpu_type;
- // unsigned int __cpu_subtype;
- // unsigned int __cpu_features[1];
- llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
- llvm::ArrayType::get(Int32Ty, 1));
-
- // Grab the global __cpu_model.
- llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
+llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
+ uint32_t Features1 = Lo_32(FeaturesMask);
+ uint32_t Features2 = Hi_32(FeaturesMask);
+
+ Value *Result = Builder.getTrue();
+
+ if (Features1 != 0) {
+ // Matching the struct layout from the compiler-rt/libgcc structure that is
+ // filled in:
+ // unsigned int __cpu_vendor;
+ // unsigned int __cpu_type;
+ // unsigned int __cpu_subtype;
+ // unsigned int __cpu_features[1];
+ llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
+ llvm::ArrayType::get(Int32Ty, 1));
+
+ // Grab the global __cpu_model.
+ llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
+
+ // Grab the first (0th) element from the field __cpu_features off of the
+ // global in the struct STy.
+ Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
+ Builder.getInt32(0)};
+ Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
+ Value *Features =
+ Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
+
+ // Check the value of the bit corresponding to the feature requested.
+ Value *Mask = Builder.getInt32(Features1);
+ Value *Bitset = Builder.CreateAnd(Features, Mask);
+ Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
+ Result = Builder.CreateAnd(Result, Cmp);
+ }
+
+ if (Features2 != 0) {
+ llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
+ "__cpu_features2");
+ Value *Features =
+ Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
+
+ // Check the value of the bit corresponding to the feature requested.
+ Value *Mask = Builder.getInt32(Features2);
+ Value *Bitset = Builder.CreateAnd(Features, Mask);
+ Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
+ Result = Builder.CreateAnd(Result, Cmp);
+ }
- // Grab the first (0th) element from the field __cpu_features off of the
- // global in the struct STy.
- Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, 3),
- ConstantInt::get(Int32Ty, 0)};
- Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
- Value *Features =
- Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
-
- // Check the value of the bit corresponding to the feature requested.
- Value *Bitset = Builder.CreateAnd(
- Features, llvm::ConstantInt::get(Int32Ty, FeaturesMask));
- return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0));
+ return Result;
}
Value *CodeGenFunction::EmitX86CpuInit() {
@@ -10425,30 +10991,22 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_addcarryx_u32:
case X86::BI__builtin_ia32_addcarryx_u64:
- case X86::BI__builtin_ia32_addcarry_u32:
- case X86::BI__builtin_ia32_addcarry_u64:
case X86::BI__builtin_ia32_subborrow_u32:
case X86::BI__builtin_ia32_subborrow_u64: {
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_addcarryx_u32:
- IID = Intrinsic::x86_addcarryx_u32;
+ IID = Intrinsic::x86_addcarry_32;
break;
case X86::BI__builtin_ia32_addcarryx_u64:
- IID = Intrinsic::x86_addcarryx_u64;
- break;
- case X86::BI__builtin_ia32_addcarry_u32:
- IID = Intrinsic::x86_addcarry_u32;
- break;
- case X86::BI__builtin_ia32_addcarry_u64:
- IID = Intrinsic::x86_addcarry_u64;
+ IID = Intrinsic::x86_addcarry_64;
break;
case X86::BI__builtin_ia32_subborrow_u32:
- IID = Intrinsic::x86_subborrow_u32;
+ IID = Intrinsic::x86_subborrow_32;
break;
case X86::BI__builtin_ia32_subborrow_u64:
- IID = Intrinsic::x86_subborrow_u64;
+ IID = Intrinsic::x86_subborrow_64;
break;
}
@@ -11314,12 +11872,16 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
- case AMDGPU::BI__builtin_amdgcn_mov_dpp: {
- llvm::SmallVector<llvm::Value *, 5> Args;
- for (unsigned I = 0; I != 5; ++I)
+ case AMDGPU::BI__builtin_amdgcn_mov_dpp:
+ case AMDGPU::BI__builtin_amdgcn_update_dpp: {
+ llvm::SmallVector<llvm::Value *, 6> Args;
+ for (unsigned I = 0; I != E->getNumArgs(); ++I)
Args.push_back(EmitScalarExpr(E->getArg(I)));
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_mov_dpp,
- Args[0]->getType());
+ assert(Args.size() == 5 || Args.size() == 6);
+ if (Args.size() == 5)
+ Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
+ Value *F =
+ CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
return Builder.CreateCall(F, Args);
}
case AMDGPU::BI__builtin_amdgcn_div_fixup:
@@ -11685,7 +12247,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {X, Y, M4Value});
}
- // Vector intrisincs that output the post-instruction CC value.
+ // Vector intrinsics that output the post-instruction CC value.
#define INTRINSIC_WITH_CC(NAME) \
case SystemZ::BI__builtin_##NAME: \
@@ -12145,7 +12707,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
bool isColMajor = isColMajorArg.getSExtValue();
unsigned IID;
unsigned NumResults = 8;
- // PTX Instructions (and LLVM instrinsics) are defined for slice _d_, yet
+ // PTX Instructions (and LLVM intrinsics) are defined for slice _d_, yet
// for some reason nvcc builtins use _c_.
switch (BuiltinID) {
case NVPTX::BI__hmma_m16n16k16_st_c_f16:
@@ -12423,6 +12985,191 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
return Builder.CreateCall(Callee, {Addr, Count});
}
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: {
+ Value *Src = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ResT = ConvertType(E->getType());
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
+ {ResT, Src->getType()});
+ return Builder.CreateCall(Callee, {Src});
+ }
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: {
+ Value *Src = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ResT = ConvertType(E->getType());
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
+ {ResT, Src->getType()});
+ return Builder.CreateCall(Callee, {Src});
+ }
+ case WebAssembly::BI__builtin_wasm_min_f32:
+ case WebAssembly::BI__builtin_wasm_min_f64:
+ case WebAssembly::BI__builtin_wasm_min_f32x4:
+ case WebAssembly::BI__builtin_wasm_min_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::minimum,
+ ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_max_f32:
+ case WebAssembly::BI__builtin_wasm_max_f64:
+ case WebAssembly::BI__builtin_wasm_max_f32x4:
+ case WebAssembly::BI__builtin_wasm_max_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::maximum,
+ ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
+ case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
+ case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
+ case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
+ llvm::APSInt LaneConst;
+ if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
+ llvm_unreachable("Constant arg isn't actually constant?");
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
+ Value *Extract = Builder.CreateExtractElement(Vec, Lane);
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
+ return Builder.CreateSExt(Extract, ConvertType(E->getType()));
+ case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
+ return Builder.CreateZExt(Extract, ConvertType(E->getType()));
+ case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
+ case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
+ case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
+ case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
+ return Extract;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ }
+ case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
+ case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
+ case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
+ llvm::APSInt LaneConst;
+ if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
+ llvm_unreachable("Constant arg isn't actually constant?");
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
+ Value *Val = EmitScalarExpr(E->getArg(2));
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
+ llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType();
+ Value *Trunc = Builder.CreateTrunc(Val, ElemType);
+ return Builder.CreateInsertElement(Vec, Trunc, Lane);
+ }
+ case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
+ case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
+ case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
+ return Builder.CreateInsertElement(Vec, Val, Lane);
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ }
+ case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
+ IntNo = Intrinsic::sadd_sat;
+ break;
+ case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
+ IntNo = Intrinsic::uadd_sat;
+ break;
+ case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
+ IntNo = Intrinsic::wasm_sub_saturate_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
+ IntNo = Intrinsic::wasm_sub_saturate_unsigned;
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_bitselect: {
+ Value *V1 = EmitScalarExpr(E->getArg(0));
+ Value *V2 = EmitScalarExpr(E->getArg(1));
+ Value *C = EmitScalarExpr(E->getArg(2));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
+ ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {V1, V2, C});
+ }
+ case WebAssembly::BI__builtin_wasm_any_true_i8x16:
+ case WebAssembly::BI__builtin_wasm_any_true_i16x8:
+ case WebAssembly::BI__builtin_wasm_any_true_i32x4:
+ case WebAssembly::BI__builtin_wasm_any_true_i64x2:
+ case WebAssembly::BI__builtin_wasm_all_true_i8x16:
+ case WebAssembly::BI__builtin_wasm_all_true_i16x8:
+ case WebAssembly::BI__builtin_wasm_all_true_i32x4:
+ case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_any_true_i8x16:
+ case WebAssembly::BI__builtin_wasm_any_true_i16x8:
+ case WebAssembly::BI__builtin_wasm_any_true_i32x4:
+ case WebAssembly::BI__builtin_wasm_any_true_i64x2:
+ IntNo = Intrinsic::wasm_anytrue;
+ break;
+ case WebAssembly::BI__builtin_wasm_all_true_i8x16:
+ case WebAssembly::BI__builtin_wasm_all_true_i16x8:
+ case WebAssembly::BI__builtin_wasm_all_true_i32x4:
+ case WebAssembly::BI__builtin_wasm_all_true_i64x2:
+ IntNo = Intrinsic::wasm_alltrue;
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
+ return Builder.CreateCall(Callee, {Vec});
+ }
+ case WebAssembly::BI__builtin_wasm_abs_f32x4:
+ case WebAssembly::BI__builtin_wasm_abs_f64x2: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
+ return Builder.CreateCall(Callee, {Vec});
+ }
+ case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
+ case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
+ return Builder.CreateCall(Callee, {Vec});
+ }
default:
return nullptr;
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
index 3f3f2c5e43..1c578bd151 100644
--- a/lib/CodeGen/CGCUDANV.cpp
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -137,7 +137,7 @@ CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
: CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
TheModule(CGM.getModule()),
- RelocatableDeviceCode(CGM.getLangOpts().CUDARelocatableDeviceCode) {
+ RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode) {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -353,8 +353,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// global variable and save a reference in GpuBinaryHandle to be cleaned up
// in destructor on exit. Then associate all known kernels with the GPU binary
// handle so CUDA runtime can figure out what to call on the GPU side.
- std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary;
- if (!IsHIP) {
+ std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary = nullptr;
+ if (!CudaGpuBinaryFileName.empty()) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
@@ -388,15 +388,23 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
ModuleIDSectionName = "__hip_module_id";
ModuleIDPrefix = "__hip_";
- // For HIP, create an external symbol __hip_fatbin in section .hip_fatbin.
- // The external symbol is supposed to contain the fat binary but will be
- // populated somewhere else, e.g. by lld through link script.
- FatBinStr = new llvm::GlobalVariable(
+ if (CudaGpuBinary) {
+ // If fatbin is available from early finalization, create a string
+ // literal containing the fat binary loaded from the given file.
+ FatBinStr = makeConstantString(CudaGpuBinary->getBuffer(), "",
+ FatbinConstantName, 8);
+ } else {
+ // If fatbin is not available, create an external symbol
+ // __hip_fatbin in section .hip_fatbin. The external symbol is supposed
+ // to contain the fat binary but will be populated somewhere else,
+ // e.g. by lld through link script.
+ FatBinStr = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty,
/*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
"__hip_fatbin", nullptr,
llvm::GlobalVariable::NotThreadLocal);
- cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
+ cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
+ }
FatMagic = HIPFatMagic;
} else {
@@ -447,6 +455,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// thread safety of the loaded program. Therefore we can assume sequential
// execution of constructor functions here.
if (IsHIP) {
+ auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage :
+ llvm::GlobalValue::LinkOnceAnyLinkage;
llvm::BasicBlock *IfBlock =
llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
llvm::BasicBlock *ExitBlock =
@@ -455,12 +465,13 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// of HIP ABI.
GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, VoidPtrPtrTy, /*isConstant=*/false,
- llvm::GlobalValue::LinkOnceAnyLinkage,
+ Linkage,
/*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
"__hip_gpubin_handle");
GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getQuantity());
// Prevent the weak symbol in different shared libraries being merged.
- GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ if (Linkage != llvm::GlobalValue::InternalLinkage)
+ GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
Address GpuBinaryAddr(
GpuBinaryHandle,
CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
@@ -509,7 +520,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// Generate a unique module ID.
SmallString<64> ModuleID;
llvm::raw_svector_ostream OS(ModuleID);
- OS << ModuleIDPrefix << llvm::format("%x", FatbinWrapper->getGUID());
+ OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
llvm::Constant *ModuleIDConstant =
makeConstantString(ModuleID.str(), "", ModuleIDSectionName, 32);
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index d5945be434..8b0733fbec 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -23,7 +23,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
@@ -276,7 +276,7 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
llvm::Value *VFunc =
CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
- CGCallee Callee(GD.getDecl()->getCanonicalDecl(), VFunc);
+ CGCallee Callee(GD, VFunc);
return Callee;
}
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 8857ffdde4..64e18e171e 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -23,11 +23,11 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -59,6 +59,7 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_X86Pascal: return llvm::CallingConv::C;
// TODO: Add support for __vectorcall to LLVM.
case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
+ case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
@@ -67,11 +68,13 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
}
}
-/// Derives the 'this' type for codegen purposes, i.e. ignoring method
+/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
/// qualification.
-/// FIXME: address space qualification?
-static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
+static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD) {
QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ if (MD)
+ RecTy = Context.getAddrSpaceQualType(RecTy, MD->getType().getAddressSpace());
return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
}
@@ -214,6 +217,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
if (PcsAttr *PCS = D->getAttr<PcsAttr>())
return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
+ if (D->hasAttr<AArch64VectorPcsAttr>())
+ return CC_AArch64VectorCall;
+
if (D->hasAttr<IntelOclBiccAttr>())
return CC_IntelOclBicc;
@@ -246,7 +252,7 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
// Add the 'this' pointer.
if (RD)
- argTypes.push_back(GetThisType(Context, RD));
+ argTypes.push_back(GetThisType(Context, RD, MD));
else
argTypes.push_back(Context.VoidPtrTy);
@@ -302,7 +308,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
SmallVector<CanQualType, 16> argTypes;
SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
- argTypes.push_back(GetThisType(Context, MD->getParent()));
+ argTypes.push_back(GetThisType(Context, MD->getParent(), MD));
bool PassParams = true;
@@ -529,7 +535,7 @@ const CGFunctionInfo &
CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
assert(MD->isVirtual() && "only methods have thunks");
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
- CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
+ CanQualType ArgTys[] = { GetThisType(Context, MD->getParent(), MD) };
return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
/*chainCall=*/false, ArgTys,
FTP->getExtInfo(), {}, RequiredArgs(1));
@@ -543,7 +549,7 @@ CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
CanQual<FunctionProtoType> FTP = GetFormalType(CD);
SmallVector<CanQualType, 2> ArgTys;
const CXXRecordDecl *RD = CD->getParent();
- ArgTys.push_back(GetThisType(Context, RD));
+ ArgTys.push_back(GetThisType(Context, RD, CD));
if (CT == Ctor_CopyingClosure)
ArgTys.push_back(*FTP->param_type_begin());
if (RD->getNumVBases() > 0)
@@ -741,8 +747,8 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
FunctionType::ExtInfo info,
ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
RequiredArgs required) {
- assert(std::all_of(argTypes.begin(), argTypes.end(),
- [](CanQualType T) { return T.isCanonicalAsParam(); }));
+ assert(llvm::all_of(argTypes,
+ [](CanQualType T) { return T.isCanonicalAsParam(); }));
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
@@ -1253,8 +1259,8 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
// Otherwise do coercion through memory. This is stupid, but simple.
Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
- Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
- Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy);
+ Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
+ Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
CGF.Builder.CreateMemCpy(Casted, SrcCasted,
llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
false);
@@ -1335,8 +1341,8 @@ static void CreateCoercedStore(llvm::Value *Src,
// to that information.
Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
CGF.Builder.CreateStore(Src, Tmp);
- Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
- Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy);
+ Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
+ Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
CGF.Builder.CreateMemCpy(DstCasted, Casted,
llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
false);
@@ -1709,6 +1715,8 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
if (CodeGenOpts.DisableRedZone)
FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
+ if (CodeGenOpts.IndirectTlsSegRefs)
+ FuncAttrs.addAttribute("indirect-tls-seg-refs");
if (CodeGenOpts.NoImplicitFloat)
FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
@@ -1785,6 +1793,8 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
if (CodeGenOpts.Backchain)
FuncAttrs.addAttribute("backchain");
+ // FIXME: The interaction of this attribute with the SLH command line flag
+ // has not been determined.
if (CodeGenOpts.SpeculativeLoadHardening)
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
}
@@ -1831,7 +1841,7 @@ void CodeGenModule::ConstructAttributeList(
AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
CalleeInfo.getCalleeFunctionProtoType());
- const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
bool HasOptnone = false;
// FIXME: handle sseregparm someday...
@@ -1848,6 +1858,8 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
if (TargetDecl->hasAttr<ConvergentAttr>())
FuncAttrs.addAttribute(llvm::Attribute::Convergent);
+ if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
AddAttributesFromFunctionProtoType(
@@ -1939,7 +1951,7 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute("disable-tail-calls",
llvm::toStringRef(DisableTailCalls));
- GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs);
+ GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
}
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
@@ -3066,8 +3078,9 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
QualType type = param->getType();
- assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
- "cannot emit delegate call arguments for inalloca arguments!");
+ if (isInAllocaArgument(CGM.getCXXABI(), type)) {
+ CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
+ }
// GetAddrOfLocalVar returns a pointer-to-pointer for references,
// but the argument needs to be the original pointer.
@@ -3948,15 +3961,28 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else if (I->hasLValue()) {
auto LV = I->getKnownLValue();
auto AS = LV.getAddressSpace();
+
if ((!ArgInfo.getIndirectByVal() &&
(LV.getAlignment() >=
- getContext().getTypeAlignInChars(I->Ty))) ||
- (ArgInfo.getIndirectByVal() &&
- ((AS != LangAS::Default && AS != LangAS::opencl_private &&
- AS != CGM.getASTAllocaAddressSpace())))) {
+ getContext().getTypeAlignInChars(I->Ty)))) {
+ NeedCopy = true;
+ }
+ if (!getLangOpts().OpenCL) {
+ if ((ArgInfo.getIndirectByVal() &&
+ (AS != LangAS::Default &&
+ AS != CGM.getASTAllocaAddressSpace()))) {
+ NeedCopy = true;
+ }
+ }
+ // For OpenCL even if RV is located in default or alloca address space
+ // we don't want to perform address space cast for it.
+ else if ((ArgInfo.getIndirectByVal() &&
+ Addr.getType()->getAddressSpace() != IRFuncTy->
+ getParamType(FirstIRArg)->getPointerAddressSpace())) {
NeedCopy = true;
}
}
+
if (NeedCopy) {
// Create an aligned temporary, and copy to it.
Address AI = CreateMemTempWithoutCast(
@@ -4238,6 +4264,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
#endif
+ // Update the largest vector width if any arguments have vector types.
+ for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
+ if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
+ }
+
// Compute the calling convention and attributes.
unsigned CallingConv;
llvm::AttributeList Attrs;
@@ -4251,8 +4284,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Apply always_inline to all calls within flatten functions.
// FIXME: should this really take priority over __try, below?
if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
- !(Callee.getAbstractInfo().getCalleeDecl() &&
- Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
+ !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
+ Callee.getAbstractInfo()
+ .getCalleeDecl()
+ .getDecl()
+ ->hasAttr<NoInlineAttr>())) {
Attrs =
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::AlwaysInline);
@@ -4318,6 +4354,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!CI->getType()->isVoidTy())
CI->setName("call");
+ // Update largest vector width from the return type.
+ if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
+
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
// IPVK_IndirectCallTarget in InstrProfData.inc.
@@ -4332,7 +4373,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Suppress tail calls if requested.
if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
- const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
}
@@ -4479,7 +4520,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} ();
// Emit the assume_aligned check on the return value.
- const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
if (Ret.isScalar() && TargetDecl) {
if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
llvm::Value *OffsetValue = nullptr;
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 99a36e4e12..c300808bea 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -46,21 +46,21 @@ class CGCalleeInfo {
/// The function prototype of the callee.
const FunctionProtoType *CalleeProtoTy;
/// The function declaration of the callee.
- const Decl *CalleeDecl;
+ GlobalDecl CalleeDecl;
public:
- explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl(nullptr) {}
- CGCalleeInfo(const FunctionProtoType *calleeProtoTy, const Decl *calleeDecl)
+ explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl() {}
+ CGCalleeInfo(const FunctionProtoType *calleeProtoTy, GlobalDecl calleeDecl)
: CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
- : CalleeProtoTy(calleeProtoTy), CalleeDecl(nullptr) {}
- CGCalleeInfo(const Decl *calleeDecl)
+ : CalleeProtoTy(calleeProtoTy), CalleeDecl() {}
+ CGCalleeInfo(GlobalDecl calleeDecl)
: CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
const FunctionProtoType *getCalleeFunctionProtoType() const {
return CalleeProtoTy;
}
- const Decl *getCalleeDecl() const { return CalleeDecl; }
+ const GlobalDecl getCalleeDecl() const { return CalleeDecl; }
};
/// All available information about a concrete callee.
@@ -171,7 +171,7 @@ public:
}
CGCalleeInfo getAbstractInfo() const {
if (isVirtual())
- return VirtualInfo.MD.getDecl();
+ return VirtualInfo.MD;
assert(isOrdinary());
return AbstractInfo;
}
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 468d81cbbb..cfc912cc9a 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -16,14 +16,15 @@
#include "CGDebugInfo.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
@@ -2012,8 +2013,19 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
bool NewPointerIsChecked) {
CallArgList Args;
+ LangAS SlotAS = E->getType().getAddressSpace();
+ QualType ThisType = D->getThisType(getContext());
+ LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace();
+ llvm::Value *ThisPtr = This.getPointer();
+ if (SlotAS != ThisAS) {
+ unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
+ llvm::Type *NewType =
+ ThisPtr->getType()->getPointerElementType()->getPointerTo(TargetThisAS);
+ ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
+ ThisAS, SlotAS, NewType);
+ }
// Push the this ptr.
- Args.add(RValue::get(This.getPointer()), D->getThisType(getContext()));
+ Args.add(RValue::get(ThisPtr), D->getThisType(getContext()));
// If this is a trivial constructor, emit a memcpy now before we lose
// the alignment information on the argument.
@@ -2122,7 +2134,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall(
Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs);
- CGCallee Callee = CGCallee::forDirect(CalleePtr, D);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(D, Type));
EmitCall(Info, Callee, ReturnValueSlot(), Args);
// Generate vtable assumptions if we're constructing a complete object
@@ -2808,7 +2820,7 @@ void CodeGenFunction::EmitForwardingCallToLambda(
// variadic arguments.
// Now emit our call.
- auto callee = CGCallee::forDirect(calleePtr, callOperator);
+ auto callee = CGCallee::forDirect(calleePtr, GlobalDecl(callOperator));
RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs);
// If necessary, copy the returned value into the slot.
@@ -2839,7 +2851,7 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
CallArgList CallArgs;
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
- Address ThisPtr = GetAddrOfBlockDecl(variable, false);
+ Address ThisPtr = GetAddrOfBlockDecl(variable);
CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
// Add the rest of the parameters.
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 0a766d1762..3743d24f11 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -366,7 +366,7 @@ static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
llvm::BasicBlock *Block) {
// If it's a branch, turn it into a switch whose default
// destination is its original target.
- llvm::TerminatorInst *Term = Block->getTerminator();
+ llvm::Instruction *Term = Block->getTerminator();
assert(Term && "can't transition block without terminator");
if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
@@ -589,7 +589,7 @@ static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
llvm::BasicBlock *To) {
// Exit is the exit block of a cleanup, so it always terminates in
// an unconditional branch or a switch.
- llvm::TerminatorInst *Term = Exit->getTerminator();
+ llvm::Instruction *Term = Exit->getTerminator();
if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 7d6eb83f12..f3a07a30eb 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -25,10 +25,10 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/ModuleMap.h"
@@ -41,6 +41,7 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MD5.h"
@@ -180,8 +181,7 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
SourceManager &SM = CGM.getContext().getSourceManager();
auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
-
- if (PCLoc.isInvalid() || Scope->getFilename() == PCLoc.getFilename())
+ if (PCLoc.isInvalid() || Scope->getFile() == getOrCreateFile(CurLoc))
return;
if (auto *LBF = dyn_cast<llvm::DILexicalBlockFile>(Scope)) {
@@ -220,7 +220,7 @@ llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context,
if (const auto *RDecl = dyn_cast<RecordDecl>(Context))
if (!RDecl->isDependentType())
return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
- getOrCreateMainFile());
+ TheCU->getFile());
return Default;
}
@@ -234,6 +234,9 @@ PrintingPolicy CGDebugInfo::getPrintingPolicy() const {
if (CGM.getCodeGenOpts().EmitCodeView)
PP.MSVCFormatting = true;
+ // Apply -fdebug-prefix-map.
+ PP.RemapFilePaths = true;
+ PP.remapPath = [this](StringRef Path) { return remapDIPath(Path); };
return PP;
}
@@ -401,19 +404,18 @@ Optional<StringRef> CGDebugInfo::getSource(const SourceManager &SM,
llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (!Loc.isValid())
// If Location is not valid then use main input file.
- return getOrCreateMainFile();
+ return TheCU->getFile();
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
+ StringRef FileName = PLoc.getFilename();
+ if (PLoc.isInvalid() || FileName.empty())
// If the location is not valid then use main input file.
- return getOrCreateMainFile();
+ return TheCU->getFile();
// Cache the results.
- const char *fname = PLoc.getFilename();
- auto It = DIFileCache.find(fname);
-
+ auto It = DIFileCache.find(FileName.data());
if (It != DIFileCache.end()) {
// Verify that the information still exists.
if (llvm::Metadata *V = It->second)
@@ -426,22 +428,48 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
-
- llvm::DIFile *F = DBuilder.createFile(
- remapDIPath(PLoc.getFilename()), remapDIPath(getCurrentDirname()), CSInfo,
- getSource(SM, SM.getFileID(Loc)));
-
- DIFileCache[fname].reset(F);
+ return createFile(FileName, CSInfo, getSource(SM, SM.getFileID(Loc)));
+}
+
+llvm::DIFile *
+CGDebugInfo::createFile(StringRef FileName,
+ Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
+ Optional<StringRef> Source) {
+ StringRef Dir;
+ StringRef File;
+ std::string RemappedFile = remapDIPath(FileName);
+ std::string CurDir = remapDIPath(getCurrentDirname());
+ SmallString<128> DirBuf;
+ SmallString<128> FileBuf;
+ if (llvm::sys::path::is_absolute(RemappedFile)) {
+ // Strip the common prefix (if it is more than just "/") from current
+ // directory and FileName for a more space-efficient encoding.
+ auto FileIt = llvm::sys::path::begin(RemappedFile);
+ auto FileE = llvm::sys::path::end(RemappedFile);
+ auto CurDirIt = llvm::sys::path::begin(CurDir);
+ auto CurDirE = llvm::sys::path::end(CurDir);
+ for (; CurDirIt != CurDirE && *CurDirIt == *FileIt; ++CurDirIt, ++FileIt)
+ llvm::sys::path::append(DirBuf, *CurDirIt);
+ if (std::distance(llvm::sys::path::begin(CurDir), CurDirIt) == 1) {
+ // The common prefix only the root; stripping it would cause
+ // LLVM diagnostic locations to be more confusing.
+ Dir = {};
+ File = RemappedFile;
+ } else {
+ for (; FileIt != FileE; ++FileIt)
+ llvm::sys::path::append(FileBuf, *FileIt);
+ Dir = DirBuf;
+ File = FileBuf;
+ }
+ } else {
+ Dir = CurDir;
+ File = RemappedFile;
+ }
+ llvm::DIFile *F = DBuilder.createFile(File, Dir, CSInfo, Source);
+ DIFileCache[FileName.data()].reset(F);
return F;
}
-llvm::DIFile *CGDebugInfo::getOrCreateMainFile() {
- return DBuilder.createFile(
- remapDIPath(TheCU->getFilename()), remapDIPath(TheCU->getDirectory()),
- TheCU->getFile()->getChecksum(),
- CGM.getCodeGenOpts().EmbedSource ? TheCU->getSource() : None);
-}
-
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
for (const auto &Entry : DebugPrefixMap)
if (Path.startswith(Entry.first))
@@ -527,11 +555,11 @@ void CGDebugInfo::CreateCompileUnit() {
llvm::dwarf::SourceLanguage LangTag;
const LangOptions &LO = CGM.getLangOpts();
if (LO.CPlusPlus) {
- if (LO.ObjC1)
+ if (LO.ObjC)
LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
else
LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
- } else if (LO.ObjC1) {
+ } else if (LO.ObjC) {
LangTag = llvm::dwarf::DW_LANG_ObjC;
} else if (LO.RenderScript) {
LangTag = llvm::dwarf::DW_LANG_GOOGLE_RenderScript;
@@ -545,7 +573,7 @@ void CGDebugInfo::CreateCompileUnit() {
// Figure out which version of the ObjC runtime we have.
unsigned RuntimeVers = 0;
- if (LO.ObjC1)
+ if (LO.ObjC)
RuntimeVers = LO.ObjCRuntime.isNonFragile() ? 2 : 1;
llvm::DICompileUnit::DebugEmissionKind EmissionKind;
@@ -566,26 +594,33 @@ void CGDebugInfo::CreateCompileUnit() {
break;
}
+ uint64_t DwoId = 0;
+ auto &CGOpts = CGM.getCodeGenOpts();
+ // The DIFile used by the CU is distinct from the main source
+ // file. Its directory part specifies what becomes the
+ // DW_AT_comp_dir (the compilation directory), even if the source
+ // file was specified with an absolute path.
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
+ llvm::DIFile *CUFile = DBuilder.createFile(
+ remapDIPath(MainFileName), remapDIPath(getCurrentDirname()), CSInfo,
+ getSource(SM, SM.getMainFileID()));
// Create new compile unit.
- // FIXME - Eliminate TheCU.
- auto &CGOpts = CGM.getCodeGenOpts();
TheCU = DBuilder.createCompileUnit(
- LangTag,
- DBuilder.createFile(remapDIPath(MainFileName),
- remapDIPath(getCurrentDirname()), CSInfo,
- getSource(SM, SM.getMainFileID())),
- CGOpts.EmitVersionIdentMetadata ? Producer : "",
+ LangTag, CUFile, CGOpts.EmitVersionIdentMetadata ? Producer : "",
LO.Optimize || CGOpts.PrepareForLTO || CGOpts.PrepareForThinLTO,
CGOpts.DwarfDebugFlags, RuntimeVers,
- CGOpts.EnableSplitDwarf ? "" : CGOpts.SplitDwarfFile, EmissionKind,
- 0 /* DWOid */, CGOpts.SplitDwarfInlining, CGOpts.DebugInfoForProfiling,
+ (CGOpts.getSplitDwarfMode() != CodeGenOptions::NoFission)
+ ? ""
+ : CGOpts.SplitDwarfFile,
+ EmissionKind, DwoId, CGOpts.SplitDwarfInlining,
+ CGOpts.DebugInfoForProfiling,
CGM.getTarget().getTriple().isNVPTX()
? llvm::DICompileUnit::DebugNameTableKind::None
: static_cast<llvm::DICompileUnit::DebugNameTableKind>(
- CGOpts.DebugNameTable));
+ CGOpts.DebugNameTable),
+ CGOpts.DebugRangesBaseAddress);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -603,9 +638,9 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return nullptr;
case BuiltinType::ObjCClass:
if (!ClassTy)
- ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", TheCU,
- getOrCreateMainFile(), 0);
+ ClassTy =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU, TheCU->getFile(), 0);
return ClassTy;
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
@@ -617,21 +652,21 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return ObjTy;
if (!ClassTy)
- ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", TheCU,
- getOrCreateMainFile(), 0);
+ ClassTy =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU, TheCU->getFile(), 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
auto *ISATy = DBuilder.createPointerType(ClassTy, Size);
- ObjTy = DBuilder.createStructType(
- TheCU, "objc_object", getOrCreateMainFile(), 0, 0, 0,
- llvm::DINode::FlagZero, nullptr, llvm::DINodeArray());
+ ObjTy = DBuilder.createStructType(TheCU, "objc_object", TheCU->getFile(), 0,
+ 0, 0, llvm::DINode::FlagZero, nullptr,
+ llvm::DINodeArray());
DBuilder.replaceArrays(
ObjTy, DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
- ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0,
+ ObjTy, "isa", TheCU->getFile(), 0, Size, 0, 0,
llvm::DINode::FlagZero, ISATy)));
return ObjTy;
}
@@ -639,7 +674,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
if (!SelTy)
SelTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
"objc_selector", TheCU,
- getOrCreateMainFile(), 0);
+ TheCU->getFile(), 0);
return SelTy;
}
@@ -658,6 +693,10 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return getOrCreateStructPtrType("opencl_queue_t", OCLQueueDITy);
case BuiltinType::OCLReserveID:
return getOrCreateStructPtrType("opencl_reserve_id_t", OCLReserveIDDITy);
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ return getOrCreateStructPtrType("opencl_" #ExtType, Id##Ty);
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::UChar:
case BuiltinType::Char_U:
@@ -956,7 +995,7 @@ llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name,
if (Cache)
return Cache;
Cache = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, Name,
- TheCU, getOrCreateMainFile(), 0);
+ TheCU, TheCU->getFile(), 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
Cache = DBuilder.createPointerType(Cache, Size);
return Cache;
@@ -1093,6 +1132,7 @@ static unsigned getDwarfCC(CallingConv CC) {
case CC_X86_64SysV:
return llvm::dwarf::DW_CC_LLVM_X86_64SysV;
case CC_AAPCS:
+ case CC_AArch64VectorCall:
return llvm::dwarf::DW_CC_LLVM_AAPCS;
case CC_AAPCS_VFP:
return llvm::dwarf::DW_CC_LLVM_AAPCS_VFP;
@@ -1485,16 +1525,16 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
// Collect virtual method info.
llvm::DIType *ContainingType = nullptr;
- unsigned Virtuality = 0;
unsigned VIndex = 0;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
+ llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
int ThisAdjustment = 0;
if (Method->isVirtual()) {
if (Method->isPure())
- Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual;
+ SPFlags |= llvm::DISubprogram::SPFlagPureVirtual;
else
- Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual;
+ SPFlags |= llvm::DISubprogram::SPFlagVirtual;
if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
// It doesn't make sense to give a virtual destructor a vtable index,
@@ -1546,12 +1586,13 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
Flags |= llvm::DINode::FlagLValueReference;
if (Method->getRefQualifier() == RQ_RValue)
Flags |= llvm::DINode::FlagRValueReference;
+ if (CGM.getLangOpts().Optimize)
+ SPFlags |= llvm::DISubprogram::SPFlagOptimized;
llvm::DINodeArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
llvm::DISubprogram *SP = DBuilder.createMethod(
RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine,
- MethodTy, /*isLocalToUnit=*/false, /*isDefinition=*/false, Virtuality,
- VIndex, ThisAdjustment, ContainingType, Flags, CGM.getLangOpts().Optimize,
+ MethodTy, VIndex, ThisAdjustment, ContainingType, Flags, SPFlags,
TParamsArray.get());
SPCache[Method->getCanonicalDecl()].reset(SP);
@@ -1776,6 +1817,29 @@ CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD,
return llvm::DINodeArray();
}
+llvm::DINodeArray CGDebugInfo::CollectVarTemplateParams(const VarDecl *VL,
+ llvm::DIFile *Unit) {
+ if (auto *TS = dyn_cast<VarTemplateSpecializationDecl>(VL)) {
+ auto T = TS->getSpecializedTemplateOrPartial();
+ auto TA = TS->getTemplateArgs().asArray();
+ // Collect parameters for a partial specialization
+ if (T.is<VarTemplatePartialSpecializationDecl *>()) {
+ const TemplateParameterList *TList =
+ T.get<VarTemplatePartialSpecializationDecl *>()
+ ->getTemplateParameters();
+ return CollectTemplateParams(TList, TA, Unit);
+ }
+
+ // Collect parameters for an explicit specialization
+ if (T.is<VarTemplateDecl *>()) {
+ const TemplateParameterList *TList = T.get<VarTemplateDecl *>()
+ ->getTemplateParameters();
+ return CollectTemplateParams(TList, TA, Unit);
+ }
+ }
+ return llvm::DINodeArray();
+}
+
llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams(
const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile *Unit) {
// Always get the full list of parameters, not just the ones from
@@ -1931,8 +1995,17 @@ static bool isDefinedInClangModule(const RecordDecl *RD) {
if (auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD)) {
if (!CXXDecl->isCompleteDefinition())
return false;
+ // Check wether RD is a template.
auto TemplateKind = CXXDecl->getTemplateSpecializationKind();
if (TemplateKind != TSK_Undeclared) {
+ // Unfortunately getOwningModule() isn't accurate enough to find the
+ // owning module of a ClassTemplateSpecializationDecl that is inside a
+ // namespace spanning multiple modules.
+ bool Explicit = false;
+ if (auto *TD = dyn_cast<ClassTemplateSpecializationDecl>(CXXDecl))
+ Explicit = TD->isExplicitInstantiationOrSpecialization();
+ if (!Explicit && CXXDecl->getEnclosingNamespaceContext())
+ return false;
// This is a template, check the origin of the first member.
if (CXXDecl->field_begin() == CXXDecl->field_end())
return TemplateKind == TSK_ExplicitInstantiationDeclaration;
@@ -2480,9 +2553,9 @@ llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
Count = CAT->getSize().getZExtValue();
else if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
if (Expr *Size = VAT->getSizeExpr()) {
- llvm::APSInt V;
- if (Size->EvaluateAsInt(V, CGM.getContext()))
- Count = V.getExtValue();
+ Expr::EvalResult Result;
+ if (Size->EvaluateAsInt(Result, CGM.getContext()))
+ Count = Result.Val.getInt().getExtValue();
}
}
@@ -2548,9 +2621,9 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
const FunctionProtoType *FPT =
Ty->getPointeeType()->getAs<FunctionProtoType>();
return DBuilder.createMemberPointerType(
- getOrCreateInstanceMethodType(CGM.getContext().getPointerType(QualType(
- Ty->getClass(), FPT->getTypeQuals())),
- FPT, U),
+ getOrCreateInstanceMethodType(
+ CXXMethodDecl::getThisType(FPT, Ty->getMostRecentCXXRecordDecl()),
+ FPT, U),
ClassType, Size, /*Align=*/0, Flags);
}
@@ -3070,6 +3143,7 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
unsigned &LineNo, QualType &T,
StringRef &Name, StringRef &LinkageName,
+ llvm::MDTuple *&TemplateParameters,
llvm::DIScope *&VDContext) {
Unit = getOrCreateFile(VD->getLocation());
LineNo = getLineNumber(VD->getLocation());
@@ -3093,6 +3167,13 @@ void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
if (LinkageName == Name)
LinkageName = StringRef();
+ if (isa<VarTemplateSpecializationDecl>(VD)) {
+ llvm::DINodeArray parameterNodes = CollectVarTemplateParams(VD, &*Unit);
+ TemplateParameters = parameterNodes.get();
+ } else {
+ TemplateParameters = nullptr;
+ }
+
// Since we emit declarations (DW_AT_members) for static members, place the
// definition of those static members in the namespace they were declared in
// in the source code (the lexical decl context).
@@ -3119,6 +3200,7 @@ llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD,
llvm::DINodeArray TParamsArray;
StringRef Name, LinkageName;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
+ llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
SourceLocation Loc = GD.getDecl()->getLocation();
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
@@ -3135,20 +3217,23 @@ llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD,
CallingConv CC = FD->getType()->castAs<FunctionType>()->getCallConv();
QualType FnType = CGM.getContext().getFunctionType(
FD->getReturnType(), ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
+ if (!FD->isExternallyVisible())
+ SPFlags |= llvm::DISubprogram::SPFlagLocalToUnit;
+ if (CGM.getLangOpts().Optimize)
+ SPFlags |= llvm::DISubprogram::SPFlagOptimized;
+
if (Stub) {
+ Flags |= getCallSiteRelatedAttrs();
+ SPFlags |= llvm::DISubprogram::SPFlagDefinition;
return DBuilder.createFunction(
DContext, Name, LinkageName, Unit, Line,
- getOrCreateFunctionType(GD.getDecl(), FnType, Unit),
- !FD->isExternallyVisible(),
- /* isDefinition = */ true, 0, Flags, CGM.getLangOpts().Optimize,
+ getOrCreateFunctionType(GD.getDecl(), FnType, Unit), 0, Flags, SPFlags,
TParamsArray.get(), getFunctionDeclaration(FD));
}
llvm::DISubprogram *SP = DBuilder.createTempFunctionFwdDecl(
DContext, Name, LinkageName, Unit, Line,
- getOrCreateFunctionType(GD.getDecl(), FnType, Unit),
- !FD->isExternallyVisible(),
- /* isDefinition = */ false, 0, Flags, CGM.getLangOpts().Optimize,
+ getOrCreateFunctionType(GD.getDecl(), FnType, Unit), 0, Flags, SPFlags,
TParamsArray.get(), getFunctionDeclaration(FD));
const FunctionDecl *CanonDecl = FD->getCanonicalDecl();
FwdDeclReplaceMap.emplace_back(std::piecewise_construct,
@@ -3173,12 +3258,14 @@ CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) {
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
unsigned Line = getLineNumber(Loc);
+ llvm::MDTuple *TemplateParameters = nullptr;
- collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, DContext);
+ collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, TemplateParameters,
+ DContext);
auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
auto *GV = DBuilder.createTempGlobalVariableFwdDecl(
DContext, Name, LinkageName, Unit, Line, getOrCreateType(T, Unit),
- !VD->isExternallyVisible(), nullptr, Align);
+ !VD->isExternallyVisible(), nullptr, TemplateParameters, Align);
FwdDeclReplaceMap.emplace_back(
std::piecewise_construct,
std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())),
@@ -3334,6 +3421,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
bool HasDecl = (D != nullptr);
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
+ llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *FDContext = Unit;
llvm::DINodeArray TParamsArray;
@@ -3373,6 +3461,15 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
if (CurFuncIsThunk)
Flags |= llvm::DINode::FlagThunk;
+ if (Fn->hasLocalLinkage())
+ SPFlags |= llvm::DISubprogram::SPFlagLocalToUnit;
+ if (CGM.getLangOpts().Optimize)
+ SPFlags |= llvm::DISubprogram::SPFlagOptimized;
+
+ llvm::DINode::DIFlags FlagsForDef = Flags | getCallSiteRelatedAttrs();
+ llvm::DISubprogram::DISPFlags SPFlagsForDef =
+ SPFlags | llvm::DISubprogram::SPFlagDefinition;
+
unsigned LineNo = getLineNumber(Loc);
unsigned ScopeLine = getLineNumber(ScopeLoc);
@@ -3383,9 +3480,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
// are emitted as CU level entities by the backend.
llvm::DISubprogram *SP = DBuilder.createFunction(
FDContext, Name, LinkageName, Unit, LineNo,
- getOrCreateFunctionType(D, FnType, Unit), Fn->hasLocalLinkage(),
- true /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize,
- TParamsArray.get(), getFunctionDeclaration(D));
+ getOrCreateFunctionType(D, FnType, Unit), ScopeLine, FlagsForDef,
+ SPFlagsForDef, TParamsArray.get(), getFunctionDeclaration(D));
Fn->setSubprogram(SP);
// We might get here with a VarDecl in the case we're generating
// code for the initialization of globals. Do not record these decls
@@ -3405,8 +3501,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
cast<llvm::DICompositeType>(It->second);
llvm::DISubprogram *FD = DBuilder.createFunction(
InterfaceDecl, Name, LinkageName, Unit, LineNo,
- getOrCreateFunctionType(D, FnType, Unit), Fn->hasLocalLinkage(),
- false /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize,
+ getOrCreateFunctionType(D, FnType, Unit), ScopeLine, Flags, SPFlags,
TParamsArray.get());
DBuilder.finalizeSubprogram(FD);
ObjCMethodCache[ID].push_back(FD);
@@ -3455,11 +3550,13 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
}
unsigned LineNo = getLineNumber(Loc);
unsigned ScopeLine = 0;
+ llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
+ if (CGM.getLangOpts().Optimize)
+ SPFlags |= llvm::DISubprogram::SPFlagOptimized;
DBuilder.retainType(DBuilder.createFunction(
FDContext, Name, LinkageName, Unit, LineNo,
- getOrCreateFunctionType(D, FnType, Unit), false /*internalLinkage*/,
- false /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize,
+ getOrCreateFunctionType(D, FnType, Unit), ScopeLine, Flags, SPFlags,
TParamsArray.get(), getFunctionDeclaration(D)));
}
@@ -3488,7 +3585,7 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
// Update our current location
setLocation(Loc);
- if (CurLoc.isInvalid() || CurLoc.isMacroID())
+ if (CurLoc.isInvalid() || CurLoc.isMacroID() || LexicalBlockStack.empty())
return;
llvm::MDNode *Scope = LexicalBlockStack.back();
@@ -4089,7 +4186,9 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
unsigned LineNo;
StringRef DeclName, LinkageName;
QualType T;
- collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName, DContext);
+ llvm::MDTuple *TemplateParameters = nullptr;
+ collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName,
+ TemplateParameters, DContext);
// Attempt to store one global variable for the declaration - even if we
// emit a lot of fields.
@@ -4115,7 +4214,8 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
Var->hasLocalLinkage(),
Expr.empty() ? nullptr : DBuilder.createExpression(Expr),
- getOrCreateStaticDataMemberDeclarationOrNull(D), Align);
+ getOrCreateStaticDataMemberDeclarationOrNull(D), TemplateParameters,
+ Align);
Var->addDebugInfo(GVE);
}
DeclCache[D->getCanonicalDecl()].reset(GVE);
@@ -4172,10 +4272,19 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
InitExpr = DBuilder.createConstantValueExpression(
Init.getFloat().bitcastToAPInt().getZExtValue());
}
+
+ llvm::MDTuple *TemplateParameters = nullptr;
+
+ if (isa<VarTemplateSpecializationDecl>(VD))
+ if (VarD) {
+ llvm::DINodeArray parameterNodes = CollectVarTemplateParams(VarD, &*Unit);
+ TemplateParameters = parameterNodes.get();
+ }
+
GV.reset(DBuilder.createGlobalVariableExpression(
DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty,
true, InitExpr, getOrCreateStaticDataMemberDeclarationOrNull(VarD),
- Align));
+ TemplateParameters, Align));
}
llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
@@ -4364,7 +4473,7 @@ void CGDebugInfo::EmitExplicitCastType(QualType Ty) {
if (CGM.getCodeGenOpts().getDebugInfo() < codegenoptions::LimitedDebugInfo)
return;
- if (auto *DieTy = getOrCreateType(Ty, getOrCreateMainFile()))
+ if (auto *DieTy = getOrCreateType(Ty, TheCU->getFile()))
// Don't ignore in case of explicit cast where it is referenced indirectly.
DBuilder.retainType(DieTy);
}
@@ -4376,3 +4485,22 @@ llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
llvm::MDNode *Scope = LexicalBlockStack.back();
return llvm::DebugLoc::get(getLineNumber(Loc), getColumnNumber(Loc), Scope);
}
+
+llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
+ // Call site-related attributes are only useful in optimized programs, and
+ // when there's a possibility of debugging backtraces.
+ if (!CGM.getLangOpts().Optimize || DebugKind == codegenoptions::NoDebugInfo ||
+ DebugKind == codegenoptions::LocTrackingOnly)
+ return llvm::DINode::FlagZero;
+
+ // Call site-related attributes are available in DWARF v5. Some debuggers,
+ // while not fully DWARF v5-compliant, may accept these attributes as if they
+ // were part of DWARF v4.
+ bool SupportsDWARFv4Ext =
+ CGM.getCodeGenOpts().DwarfVersion == 4 &&
+ CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB;
+ if (!SupportsDWARFv4Ext && CGM.getCodeGenOpts().DwarfVersion < 5)
+ return llvm::DINode::FlagZero;
+
+ return llvm::DINode::FlagAllCallsDescribed;
+}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 8641c2d896..031e40b9dd 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -20,8 +20,8 @@
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
@@ -76,6 +76,9 @@ class CGDebugInfo {
llvm::DIType *OCLQueueDITy = nullptr;
llvm::DIType *OCLNDRangeDITy = nullptr;
llvm::DIType *OCLReserveIDDITy = nullptr;
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ llvm::DIType *Id##Ty = nullptr;
+#include "clang/Basic/OpenCLExtensionTypes.def"
/// Cache of previously constructed Types.
llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
@@ -248,6 +251,11 @@ class CGDebugInfo {
llvm::DINodeArray CollectFunctionTemplateParams(const FunctionDecl *FD,
llvm::DIFile *Unit);
+ /// A helper function to collect debug info for function template
+ /// parameters.
+ llvm::DINodeArray CollectVarTemplateParams(const VarDecl *VD,
+ llvm::DIFile *Unit);
+
/// A helper function to collect debug info for template
/// parameters.
llvm::DINodeArray
@@ -333,6 +341,9 @@ public:
void finalize();
+ /// Remap a given path with the current debug prefix map
+ std::string remapDIPath(StringRef) const;
+
/// Register VLA size expression debug node with the qualified type.
void registerVLASizeExpression(QualType Ty, llvm::Metadata *SizeExpr) {
SizeExprCache[Ty] = SizeExpr;
@@ -520,9 +531,6 @@ private:
/// Create new compile unit.
void CreateCompileUnit();
- /// Remap a given path with the current debug prefix map
- std::string remapDIPath(StringRef) const;
-
/// Compute the file checksum debug info for input file ID.
Optional<llvm::DIFile::ChecksumKind>
computeChecksum(FileID FID, SmallString<32> &Checksum) const;
@@ -530,11 +538,15 @@ private:
/// Get the source of the given file ID.
Optional<StringRef> getSource(const SourceManager &SM, FileID FID);
- /// Get the file debug info descriptor for the input location.
+ /// Convenience function to get the file debug info descriptor for the input
+ /// location.
llvm::DIFile *getOrCreateFile(SourceLocation Loc);
- /// Get the file info for main compile unit.
- llvm::DIFile *getOrCreateMainFile();
+ /// Create a file debug info descriptor for a source file.
+ llvm::DIFile *
+ createFile(StringRef FileName,
+ Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
+ Optional<StringRef> Source);
/// Get the type from the cache or create a new type if necessary.
llvm::DIType *getOrCreateType(QualType Ty, llvm::DIFile *Fg);
@@ -603,6 +615,11 @@ private:
unsigned LineNo, StringRef LinkageName,
llvm::GlobalVariable *Var, llvm::DIScope *DContext);
+
+ /// Return flags which enable debug info emission for call sites, provided
+ /// that it is supported and enabled.
+ llvm::DINode::DIFlags getCallSiteRelatedAttrs() const;
+
/// Get the printing policy for producing names for debug info.
PrintingPolicy getPrintingPolicy() const;
@@ -645,7 +662,9 @@ private:
/// Collect various properties of a VarDecl.
void collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
unsigned &LineNo, QualType &T, StringRef &Name,
- StringRef &LinkageName, llvm::DIScope *&VDContext);
+ StringRef &LinkageName,
+ llvm::MDTuple *&TemplateParameters,
+ llvm::DIScope *&VDContext);
/// Allocate a copy of \p A using the DebugInfoNames allocator
/// and return a reference to it. If multiple arguments are given the strings
@@ -725,7 +744,7 @@ public:
/// function \p InlinedFn. The current debug location becomes the inlined call
/// site of the inlined function.
ApplyInlineDebugLocation(CodeGenFunction &CGF, GlobalDecl InlinedFn);
- /// Restore everything back to the orginial state.
+ /// Restore everything back to the original state.
~ApplyInlineDebugLocation();
};
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index e74066ef43..f4fef45a12 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -26,10 +26,10 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
@@ -754,9 +754,9 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
- if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
- enterFullExpression(ewc);
- init = ewc->getSubExpr();
+ if (const FullExpr *fe = dyn_cast<FullExpr>(init)) {
+ enterFullExpression(fe);
+ init = fe->getSubExpr();
}
CodeGenFunction::RunCleanupsScope Scope(*this);
@@ -963,6 +963,49 @@ static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
return llvm::isBytewiseValue(Init);
}
+static Address createUnnamedGlobalFrom(CodeGenModule &CGM, const VarDecl &D,
+ CGBuilderTy &Builder,
+ llvm::Constant *Constant,
+ CharUnits Align) {
+ auto FunctionName = [&](const DeclContext *DC) -> std::string {
+ if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
+ if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
+ return CC->getNameAsString();
+ if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
+ return CD->getNameAsString();
+ return CGM.getMangledName(FD);
+ } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
+ return OM->getNameAsString();
+ } else if (isa<BlockDecl>(DC)) {
+ return "<block>";
+ } else if (isa<CapturedDecl>(DC)) {
+ return "<captured>";
+ } else {
+ llvm::llvm_unreachable_internal("expected a function or method");
+ }
+ };
+
+ auto *Ty = Constant->getType();
+ bool isConstant = true;
+ llvm::GlobalVariable *InsertBefore = nullptr;
+ unsigned AS = CGM.getContext().getTargetAddressSpace(
+ CGM.getStringLiteralAddressSpace());
+ llvm::GlobalVariable *GV = new llvm::GlobalVariable(
+ CGM.getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
+ Constant,
+ "__const." + FunctionName(D.getParentFunctionOrMethod()) + "." +
+ D.getName(),
+ InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
+ GV->setAlignment(Align.getQuantity());
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+
+ Address SrcPtr = Address(GV, Align);
+ llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(), AS);
+ if (SrcPtr.getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
+ return SrcPtr;
+}
+
static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
Address Loc, bool isVolatile,
CGBuilderTy &Builder,
@@ -1002,25 +1045,10 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
return;
}
- // Otherwise, create a temporary global with the initializer then memcpy from
- // the global to the alloca.
- std::string Name = getStaticDeclName(CGM, D);
- unsigned AS = CGM.getContext().getTargetAddressSpace(
- CGM.getStringLiteralAddressSpace());
- llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(), AS);
-
- llvm::GlobalVariable *GV = new llvm::GlobalVariable(
- CGM.getModule(), constant->getType(), true,
- llvm::GlobalValue::PrivateLinkage, constant, Name, nullptr,
- llvm::GlobalValue::NotThreadLocal, AS);
- GV->setAlignment(Loc.getAlignment().getQuantity());
- GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
-
- Address SrcPtr = Address(GV, Loc.getAlignment());
- if (SrcPtr.getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
-
- Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile);
+ Builder.CreateMemCpy(
+ Loc,
+ createUnnamedGlobalFrom(CGM, D, Builder, constant, Loc.getAlignment()),
+ SizeVal, isVolatile);
}
/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
@@ -1066,6 +1094,7 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
// For each dimension stores its QualType and corresponding
// size-expression Value.
SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
+ SmallVector<IdentifierInfo *, 4> VLAExprNames;
// Break down the array into individual dimensions.
QualType Type1D = D.getType();
@@ -1074,8 +1103,14 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
else {
- auto SizeExprAddr = CreateDefaultAlignTempAlloca(
- VlaSize.NumElts->getType(), "__vla_expr");
+ // Generate a locally unique name for the size expression.
+ Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
+ SmallString<12> Buffer;
+ StringRef NameRef = Name.toStringRef(Buffer);
+ auto &Ident = getContext().Idents.getOwn(NameRef);
+ VLAExprNames.push_back(&Ident);
+ auto SizeExprAddr =
+ CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
Dimensions.emplace_back(SizeExprAddr.getPointer(),
Type1D.getUnqualifiedType());
@@ -1089,20 +1124,20 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
// Register each dimension's size-expression with a DILocalVariable,
// so that it can be used by CGDebugInfo when instantiating a DISubrange
// to describe this array.
+ unsigned NameIdx = 0;
for (auto &VlaSize : Dimensions) {
llvm::Metadata *MD;
if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
MD = llvm::ConstantAsMetadata::get(C);
else {
// Create an artificial VarDecl to generate debug info for.
- IdentifierInfo &NameIdent = getContext().Idents.getOwn(
- cast<llvm::AllocaInst>(VlaSize.NumElts)->getName());
+ IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
auto QT = getContext().getIntTypeForBitwidth(
VlaExprTy->getScalarSizeInBits(), false);
auto *ArtificialDecl = VarDecl::Create(
getContext(), const_cast<DeclContext *>(D.getDeclContext()),
- D.getLocation(), D.getLocation(), &NameIdent, QT,
+ D.getLocation(), D.getLocation(), NameIdent, QT,
getContext().CreateTypeSourceInfo(QT), SC_Auto);
ArtificialDecl->setImplicit();
@@ -1125,8 +1160,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
AutoVarEmission emission(D);
- bool isByRef = D.hasAttr<BlocksAttr>();
- emission.IsByRef = isByRef;
+ bool isEscapingByRef = D.isEscapingByref();
+ emission.IsEscapingByRef = isEscapingByRef;
CharUnits alignment = getContext().getDeclAlign(&D);
@@ -1165,8 +1200,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// in OpenCL.
if ((!getLangOpts().OpenCL ||
Ty.getAddressSpace() == LangAS::opencl_constant) &&
- (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
- CGM.isTypeConstant(Ty, true))) {
+ (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
+ !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
// Signal this condition to later callbacks.
@@ -1218,7 +1253,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
} else {
CharUnits allocaAlignment;
llvm::Type *allocaTy;
- if (isByRef) {
+ if (isEscapingByRef) {
auto &byrefInfo = getBlockByrefInfo(&D);
allocaTy = byrefInfo.Type;
allocaAlignment = byrefInfo.ByrefAlignment;
@@ -1418,7 +1453,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
}
// Initialize the structure of a __block variable.
- if (emission.IsByRef)
+ if (emission.IsEscapingByRef)
emitByrefStructureInit(emission);
// Initialize the variable here if it doesn't have a initializer and it is a
@@ -1428,7 +1463,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
type.isNonTrivialToPrimitiveDefaultInitialize() ==
QualType::PDIK_Struct) {
LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
- if (emission.IsByRef)
+ if (emission.IsEscapingByRef)
drillIntoBlockVariable(*this, Dst, &D);
defaultInitNonTrivialCStructVar(Dst);
return;
@@ -1440,7 +1475,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
// Check whether this is a byref variable that's potentially
// captured and moved by its own initializer. If so, we'll need to
// emit the initializer first, then copy into the variable.
- bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init);
+ bool capturedByInit = emission.IsEscapingByRef && isCapturedBy(D, Init);
Address Loc =
capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
@@ -1634,7 +1669,8 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
// If this is a block variable, call _Block_object_destroy
// (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
// mode.
- if (emission.IsByRef && CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
+ if (emission.IsEscapingByRef &&
+ CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
if (emission.Variable->getType().isObjCGCWeak())
Flags |= BLOCK_FIELD_IS_WEAK;
@@ -2149,5 +2185,5 @@ void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
}
void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
- //Do nothing - here to avoid build errors
+ getOpenMPRuntime().checkArchForUnifiedAddressing(*this, D);
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 3f96fea608..9aa31f181e 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -15,7 +15,7 @@
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGOpenMPRuntime.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
@@ -26,7 +26,10 @@ using namespace CodeGen;
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
ConstantAddress DeclPtr) {
- assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
+ assert(
+ (D.hasGlobalStorage() ||
+ (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
+ "VarDecl must have global or local (in the case of OpenCL) storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
@@ -63,15 +66,24 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
/// Emit code to cause the destruction of the given variable with
/// static storage duration.
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
- ConstantAddress addr) {
+ ConstantAddress Addr) {
+ // Honor __attribute__((no_destroy)) and bail instead of attempting
+ // to emit a reference to a possibly nonexistent destructor, which
+ // in turn can cause a crash. This will result in a global constructor
+ // that isn't balanced out by a destructor call as intended by the
+ // attribute. This also checks for -fno-c++-static-destructors and
+ // bails even if the attribute is not present.
+ if (D.isNoDestroy(CGF.getContext()))
+ return;
+
CodeGenModule &CGM = CGF.CGM;
// FIXME: __attribute__((cleanup)) ?
- QualType type = D.getType();
- QualType::DestructionKind dtorKind = type.isDestructedType();
+ QualType Type = D.getType();
+ QualType::DestructionKind DtorKind = Type.isDestructedType();
- switch (dtorKind) {
+ switch (DtorKind) {
case QualType::DK_none:
return;
@@ -86,13 +98,14 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
return;
}
- llvm::Constant *function;
- llvm::Constant *argument;
+ llvm::Constant *Func;
+ llvm::Constant *Argument;
// Special-case non-array C++ destructors, if they have the right signature.
// Under some ABIs, destructors return this instead of void, and cannot be
- // passed directly to __cxa_atexit if the target does not allow this mismatch.
- const CXXRecordDecl *Record = type->getAsCXXRecordDecl();
+ // passed directly to __cxa_atexit if the target does not allow this
+ // mismatch.
+ const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
bool CanRegisterDestructor =
Record && (!CGM.getCXXABI().HasThisReturn(
GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
@@ -103,43 +116,47 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
assert(!Record->hasTrivialDestructor());
- CXXDestructorDecl *dtor = Record->getDestructor();
+ CXXDestructorDecl *Dtor = Record->getDestructor();
- function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete);
- argument = llvm::ConstantExpr::getBitCast(
- addr.getPointer(), CGF.getTypes().ConvertType(type)->getPointerTo());
+ Func = CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete);
+ Argument = llvm::ConstantExpr::getBitCast(
+ Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
// Otherwise, the standard logic requires a helper function.
} else {
- function = CodeGenFunction(CGM)
- .generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind),
- CGF.needsEHCleanup(dtorKind), &D);
- argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+ Func = CodeGenFunction(CGM)
+ .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
+ CGF.needsEHCleanup(DtorKind), &D);
+ Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
}
- CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument);
+ CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument);
}
/// Emit code to cause the variable at the given address to be considered as
/// constant from this point onwards.
static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *Addr) {
+ return CGF.EmitInvariantStart(
+ Addr, CGF.getContext().getTypeSizeInChars(D.getType()));
+}
+
+void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
// Do not emit the intrinsic if we're not optimizing.
- if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
+ if (!CGM.getCodeGenOpts().OptimizationLevel)
return;
// Grab the llvm.invariant.start intrinsic.
llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
// Overloaded address space type.
- llvm::Type *ObjectPtr[1] = {CGF.Int8PtrTy};
- llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID, ObjectPtr);
+ llvm::Type *ObjectPtr[1] = {Int8PtrTy};
+ llvm::Constant *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
// Emit a call with the size in bytes of the object.
- CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
- uint64_t Width = WidthChars.getQuantity();
- llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width),
- llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)};
- CGF.Builder.CreateCall(InvariantStart, Args);
+ uint64_t Width = Size.getQuantity();
+ llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width),
+ llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)};
+ Builder.CreateCall(InvariantStart, Args);
}
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
@@ -360,11 +377,21 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
auto RASignKind = getCodeGenOpts().getSignReturnAddress();
- if (RASignKind != CodeGenOptions::SignReturnAddressScope::None)
+ if (RASignKind != CodeGenOptions::SignReturnAddressScope::None) {
Fn->addFnAttr("sign-return-address",
RASignKind == CodeGenOptions::SignReturnAddressScope::All
? "all"
: "non-leaf");
+ auto RASignKey = getCodeGenOpts().getSignReturnAddressKey();
+ Fn->addFnAttr("sign-return-address-key",
+ RASignKey == CodeGenOptions::SignReturnAddressKeyValue::AKey
+ ? "a_key"
+ : "b_key");
+ }
+
+ if (getCodeGenOpts().BranchTargetEnforcement)
+ Fn->addFnAttr("branch-target-enforcement");
+
return Fn;
}
@@ -597,7 +624,7 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
void
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
ArrayRef<llvm::Function *> Decls,
- Address Guard) {
+ ConstantAddress Guard) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
@@ -621,6 +648,12 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
// initializers use previously-initialized thread_local vars, that's
// probably supposed to be OK, but the standard doesn't say.
Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
+
+ // The guard variable can't ever change again.
+ EmitInvariantStart(
+ Guard.getPointer(),
+ CharUnits::fromQuantity(
+ CGM.getDataLayout().getTypeAllocSize(GuardVal->getType())));
}
RunCleanupsScope Scope(*this);
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 4ee835259a..f1298d1201 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -66,7 +66,7 @@ llvm::Constant *CodeGenModule::getTerminateFn() {
name = "__std_terminate";
else
name = "?terminate@@YAXXZ";
- } else if (getLangOpts().ObjC1 &&
+ } else if (getLangOpts().ObjC &&
getLangOpts().ObjCRuntime.hasTerminate())
name = "objc_terminate";
else
@@ -224,7 +224,7 @@ const EHPersonality &EHPersonality::get(CodeGenModule &CGM,
if (FD && FD->usesSEHTry())
return getSEHPersonalityMSVC(T);
- if (L.ObjC1)
+ if (L.ObjC)
return L.CPlusPlus ? getObjCXXPersonality(Target, L)
: getObjCPersonality(Target, L);
return L.CPlusPlus ? getCXXPersonality(Target, L)
@@ -250,7 +250,11 @@ static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
- return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
+ llvm::PointerType* Int8PtrTy = llvm::PointerType::get(
+ llvm::Type::getInt8Ty(CGM.getLLVMContext()),
+ CGM.getDataLayout().getProgramAddressSpace());
+
+ return llvm::ConstantExpr::getBitCast(Fn, Int8PtrTy);
}
/// Check whether a landingpad instruction only uses C++ features.
@@ -315,7 +319,7 @@ static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
/// when it really needs it.
void CodeGenModule::SimplifyPersonality() {
// If we're not in ObjC++ -fexceptions, there's nothing to do.
- if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
+ if (!LangOpts.CPlusPlus || !LangOpts.ObjC || !LangOpts.Exceptions)
return;
// Both the problem this endeavors to fix and the way the logic
@@ -1248,7 +1252,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// we follow the false destination for each of the cond branches to reach
// the rethrow block.
llvm::BasicBlock *RethrowBlock = WasmCatchStartBlock;
- while (llvm::TerminatorInst *TI = RethrowBlock->getTerminator()) {
+ while (llvm::Instruction *TI = RethrowBlock->getTerminator()) {
auto *BI = cast<llvm::BranchInst>(TI);
assert(BI->isConditional());
RethrowBlock = BI->getSuccessor(1);
@@ -1874,7 +1878,7 @@ void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
OutlinedStmt->getBeginLoc(), OutlinedStmt->getBeginLoc());
CurSEHParent = ParentCGF.CurSEHParent;
- CGM.SetLLVMFunctionAttributes(nullptr, FnInfo, CurFn);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FnInfo, CurFn);
EmitCapturedLocals(ParentCGF, OutlinedStmt, IsFilter);
}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index b74937e9ca..6ef1091cc0 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -26,7 +26,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/NSAPI.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
@@ -1260,6 +1260,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
case Expr::DeclRefExprClass:
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+ case Expr::ConstantExprClass:
+ return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
case Expr::ParenExprClass:
return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
case Expr::GenericSelectionExprClass:
@@ -1491,6 +1493,16 @@ CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
return ConstantEmission();
}
+llvm::Value *CodeGenFunction::emitScalarConstant(
+ const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
+ assert(Constant && "not a constant");
+ if (Constant.isReference())
+ return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
+ E->getExprLoc())
+ .getScalarVal();
+ return Constant.getValue();
+}
+
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
SourceLocation Loc) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
@@ -2486,7 +2498,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
assert(isa<BlockDecl>(CurCodeDecl));
- Address addr = GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>());
+ Address addr = GetAddrOfBlockDecl(VD);
return MakeAddrLValue(addr, T, AlignmentSource::Decl);
}
}
@@ -2538,7 +2550,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
// Drill into block byref variables.
- bool isBlockByref = VD->hasAttr<BlocksAttr>();
+ bool isBlockByref = VD->isEscapingByref();
if (isBlockByref) {
addr = emitBlockByrefAddress(addr, VD);
}
@@ -2601,7 +2613,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
// into a pointer to object.
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC &&
LV.isObjCWeak())
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
@@ -2662,7 +2674,7 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
if (FnName.startswith("\01"))
FnName = FnName.substr(1);
StringRef NameItems[] = {
- PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
+ PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
std::string Name = SL->getString();
@@ -2867,6 +2879,11 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
CheckRecoverableKind RecoverKind, bool IsFatal,
llvm::BasicBlock *ContBB) {
assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
+ Optional<ApplyDebugLocation> DL;
+ if (!CGF.Builder.getCurrentDebugLocation()) {
+ // Ensure that the call has at least an artificial debug location.
+ DL.emplace(CGF, SourceLocation());
+ }
bool NeedsAbortSuffix =
IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
@@ -3478,7 +3495,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC) {
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
@@ -3931,7 +3948,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
LValue RefLVal = MakeAddrLValue(addr, FieldType, FieldBaseInfo,
FieldTBAAInfo);
if (RecordCVR & Qualifiers::Volatile)
- RefLVal.getQuals().setVolatile(true);
+ RefLVal.getQuals().addVolatile();
addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
// Qualifiers on the struct don't apply to the referencee.
@@ -4151,8 +4168,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
- case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
case CK_Dependent:
@@ -4246,6 +4264,15 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
+ case CK_AddressSpaceConversion: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ QualType DestTy = getContext().getPointerType(E->getType());
+ llvm::Value *V = getTargetHooks().performAddrSpaceCast(
+ *this, LV.getPointer(), E->getSubExpr()->getType().getAddressSpace(),
+ E->getType().getAddressSpace(), ConvertType(DestTy));
+ return MakeAddrLValue(Address(V, LV.getAddress().getAlignment()),
+ E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
+ }
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
Address V = Builder.CreateElementBitCast(LV.getAddress(),
@@ -4253,10 +4280,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
- case CK_ZeroToOCLQueue:
- llvm_unreachable("NULL to OpenCL queue lvalue cast is not valid");
- case CK_ZeroToOCLEvent:
- llvm_unreachable("NULL to OpenCL event lvalue cast is not valid");
+ case CK_ZeroToOCLOpaqueType:
+ llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
}
llvm_unreachable("Unhandled lvalue cast kind?");
@@ -4363,7 +4388,7 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) {
}
llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD);
- return CGCallee::forDirect(calleePtr, FD);
+ return CGCallee::forDirect(calleePtr, GlobalDecl(FD));
}
CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
@@ -4407,8 +4432,13 @@ CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
calleePtr = EmitLValue(E).getPointer();
}
assert(functionType->isFunctionType());
- CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(),
- E->getReferencedDeclOfCallee());
+
+ GlobalDecl GD;
+ if (const auto *VD =
+ dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
+ GD = GlobalDecl(VD);
+
+ CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
CGCallee callee(calleeInfo, calleePtr);
return callee;
}
@@ -4593,7 +4623,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
assert(CalleeType->isFunctionPointerType() &&
"Call must have function pointer type!");
- const Decl *TargetDecl = OrigCallee.getAbstractInfo().getCalleeDecl();
+ const Decl *TargetDecl =
+ OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
// We can only guarantee that a function is called from the correct
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 6264110286..db49b3f28a 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -125,6 +125,10 @@ public:
return Visit(E->getReplacement());
}
+ void VisitConstantExpr(ConstantExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
// l-values.
void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
@@ -847,10 +851,11 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 393a4aa787..2e0d4ca767 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -17,8 +17,8 @@
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "ConstantEmitter.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Intrinsics.h"
@@ -177,7 +177,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
- CGCallee callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD), MD);
+ CGCallee callee =
+ CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD));
return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
ReturnValue);
}
@@ -353,13 +354,13 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
else if (!DevirtualizedMethod)
Callee = CGCallee::forDirect(
CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty),
- Dtor);
+ GlobalDecl(Dtor, Dtor_Complete));
else {
const CXXDestructorDecl *DDtor =
cast<CXXDestructorDecl>(DevirtualizedMethod);
Callee = CGCallee::forDirect(
- CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
- DDtor);
+ CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
+ GlobalDecl(DDtor, Dtor_Complete));
}
EmitCXXMemberOrOperatorCall(
CalleeDecl, Callee, ReturnValue, This.getPointer(),
@@ -371,8 +372,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
CGCallee Callee;
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
Callee = CGCallee::forDirect(
- CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
- Ctor);
+ CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
+ GlobalDecl(Ctor, Ctor_Complete));
} else if (UseVirtualCall) {
Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
} else {
@@ -389,11 +390,12 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), MD);
+ Callee =
+ CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD));
else {
- Callee = CGCallee::forDirect(
- CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
- DevirtualizedMethod);
+ Callee =
+ CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
+ GlobalDecl(DevirtualizedMethod));
}
}
@@ -1293,7 +1295,7 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
const CallArgList &Args) {
llvm::Instruction *CallOrInvoke;
llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
- CGCallee Callee = CGCallee::forDirect(CalleePtr, CalleeDecl);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
RValue RV =
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
Args, CalleeType, /*chainCall=*/false),
@@ -2252,7 +2254,6 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
}
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
- RunCleanupsScope Scope(*this);
LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index fb176093a7..2db693b44c 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -101,6 +101,9 @@ public:
llvm_unreachable("Stmt can't have complex result type!");
}
ComplexPairTy VisitExpr(Expr *S);
+ ComplexPairTy VisitConstantExpr(ConstantExpr *E) {
+ return Visit(E->getSubExpr());
+ }
ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
ComplexPairTy VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
return Visit(GE->getResultExpr());
@@ -505,10 +508,11 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 651b05a26f..c9475840ae 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -47,7 +47,7 @@ class ConstStructBuilder {
public:
static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
ConstExprEmitter *ExprEmitter,
- llvm::ConstantStruct *Base,
+ llvm::Constant *Base,
InitListExpr *Updater,
QualType ValTy);
static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
@@ -76,7 +76,7 @@ private:
void ConvertStructToPacked();
bool Build(InitListExpr *ILE);
- bool Build(ConstExprEmitter *Emitter, llvm::ConstantStruct *Base,
+ bool Build(ConstExprEmitter *Emitter, llvm::Constant *Base,
InitListExpr *Updater);
bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
@@ -566,7 +566,7 @@ llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
ConstExprEmitter *ExprEmitter,
- llvm::ConstantStruct *Base,
+ llvm::Constant *Base,
InitListExpr *Updater,
QualType ValTy) {
ConstStructBuilder Builder(Emitter);
@@ -723,6 +723,10 @@ public:
return nullptr;
}
+ llvm::Constant *VisitConstantExpr(ConstantExpr *CE, QualType T) {
+ return Visit(CE->getSubExpr(), T);
+ }
+
llvm::Constant *VisitParenExpr(ParenExpr *PE, QualType T) {
return Visit(PE->getSubExpr(), T);
}
@@ -869,8 +873,9 @@ public:
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
+ case CK_ZeroToOCLOpaqueType:
return nullptr;
}
llvm_unreachable("Invalid CastKind");
@@ -1026,8 +1031,8 @@ public:
}
if (destType->isRecordType())
- return ConstStructBuilder::BuildStruct(Emitter, this,
- dyn_cast<llvm::ConstantStruct>(Base), Updater, destType);
+ return ConstStructBuilder::BuildStruct(Emitter, this, Base, Updater,
+ destType);
return nullptr;
}
@@ -1102,7 +1107,7 @@ public:
} // end anonymous namespace.
bool ConstStructBuilder::Build(ConstExprEmitter *ExprEmitter,
- llvm::ConstantStruct *Base,
+ llvm::Constant *Base,
InitListExpr *Updater) {
assert(Base && "base expression should not be empty");
@@ -1110,7 +1115,7 @@ bool ConstStructBuilder::Build(ConstExprEmitter *ExprEmitter,
RecordDecl *RD = ExprType->getAs<RecordType>()->getDecl();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
const llvm::StructLayout *BaseLayout = CGM.getDataLayout().getStructLayout(
- Base->getType());
+ cast<llvm::StructType>(Base->getType()));
unsigned FieldNo = -1;
unsigned ElementNo = 0;
@@ -1131,7 +1136,7 @@ bool ConstStructBuilder::Build(ConstExprEmitter *ExprEmitter,
if (Field->isUnnamedBitfield())
continue;
- llvm::Constant *EltInit = Base->getOperand(ElementNo);
+ llvm::Constant *EltInit = Base->getAggregateElement(ElementNo);
// Bail out if the type of the ConstantStruct does not have the same layout
// as the type of the InitListExpr.
@@ -1450,6 +1455,7 @@ llvm::Constant *ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) {
if (CD->isTrivial() && CD->isDefaultConstructor())
return CGM.EmitNullConstant(D.getType());
}
+ InConstantContext = true;
}
QualType destType = D.getType();
@@ -1547,7 +1553,7 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const Expr *E,
if (destType->isReferenceType())
Success = E->EvaluateAsLValue(Result, CGM.getContext());
else
- Success = E->EvaluateAsRValue(Result, CGM.getContext());
+ Success = E->EvaluateAsRValue(Result, CGM.getContext(), InConstantContext);
llvm::Constant *C;
if (Success && !Result.HasSideEffects)
@@ -1600,6 +1606,7 @@ private:
ConstantLValue tryEmitBase(const APValue::LValueBase &base);
ConstantLValue VisitStmt(const Stmt *S) { return nullptr; }
+ ConstantLValue VisitConstantExpr(const ConstantExpr *E);
ConstantLValue VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
ConstantLValue VisitStringLiteral(const StringLiteral *E);
ConstantLValue VisitObjCEncodeExpr(const ObjCEncodeExpr *E);
@@ -1755,6 +1762,11 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
}
ConstantLValue
+ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) {
+ return Visit(E->getSubExpr());
+}
+
+ConstantLValue
ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
return tryEmitGlobalCompoundLiteral(CGM, Emitter.CGF, E);
}
@@ -1782,7 +1794,7 @@ ConstantLValueEmitter::VisitPredefinedExpr(const PredefinedExpr *E) {
return cast<ConstantAddress>(Res.getAddress());
}
- auto kind = E->getIdentType();
+ auto kind = E->getIdentKind();
if (kind == PredefinedExpr::PrettyFunction) {
return CGM.GetAddrOfConstantCString("top level", ".tmp");
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 4995db28ba..f53bb33e46 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -11,11 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenFunction.h"
-#include "CGCleanup.h"
#include "CGCXXABI.h"
+#include "CGCleanup.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
@@ -23,8 +23,9 @@
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/FixedPoint.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/Optional.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
@@ -302,7 +303,11 @@ public:
/// Known implicit conversion check kinds.
/// Keep in sync with the enum of the same name in ubsan_handlers.h
enum ImplicitConversionCheckKind : unsigned char {
- ICCK_IntegerTruncation = 0,
+ ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
+ ICCK_UnsignedIntegerTruncation = 1,
+ ICCK_SignedIntegerTruncation = 2,
+ ICCK_IntegerSignChange = 3,
+ ICCK_SignedIntegerTruncationOrSignChange = 4,
};
/// Emit a check that an [implicit] truncation of an integer does not
@@ -310,21 +315,39 @@ public:
void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
QualType DstType, SourceLocation Loc);
+ /// Emit a check that an [implicit] conversion of an integer does not change
+ /// the sign of the value. It is not UB, so we use the value after conversion.
+ /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
+ void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
+ QualType DstType, SourceLocation Loc);
+
/// Emit a conversion from the specified type to the specified destination
/// type, both of which are LLVM scalar types.
struct ScalarConversionOpts {
bool TreatBooleanAsSigned;
bool EmitImplicitIntegerTruncationChecks;
+ bool EmitImplicitIntegerSignChangeChecks;
ScalarConversionOpts()
: TreatBooleanAsSigned(false),
- EmitImplicitIntegerTruncationChecks(false) {}
+ EmitImplicitIntegerTruncationChecks(false),
+ EmitImplicitIntegerSignChangeChecks(false) {}
+
+ ScalarConversionOpts(clang::SanitizerSet SanOpts)
+ : TreatBooleanAsSigned(false),
+ EmitImplicitIntegerTruncationChecks(
+ SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
+ EmitImplicitIntegerSignChangeChecks(
+ SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
};
Value *
EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
SourceLocation Loc,
ScalarConversionOpts Opts = ScalarConversionOpts());
+ Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
+ SourceLocation Loc);
+
/// Emit a conversion from the specified complex type to the specified
/// destination type, where the destination type is an LLVM scalar type.
Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
@@ -382,6 +405,9 @@ public:
}
Value *VisitExpr(Expr *S);
+ Value *VisitConstantExpr(ConstantExpr *E) {
+ return Visit(E->getSubExpr());
+ }
Value *VisitParenExpr(ParenExpr *PE) {
return Visit(PE->getSubExpr());
}
@@ -450,19 +476,10 @@ public:
return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
}
- Value *emitConstant(const CodeGenFunction::ConstantEmission &Constant,
- Expr *E) {
- assert(Constant && "not a constant");
- if (Constant.isReference())
- return EmitLoadOfLValue(Constant.getReferenceLValue(CGF, E),
- E->getExprLoc());
- return Constant.getValue();
- }
-
// l-values.
Value *VisitDeclRefExpr(DeclRefExpr *E) {
if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
- return emitConstant(Constant, E);
+ return CGF.emitScalarConstant(Constant, E);
return EmitLoadOfLValue(E);
}
@@ -664,7 +681,7 @@ public:
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
- // Fall through.
+ LLVM_FALLTHROUGH;
case LangOptions::SOB_Trapping:
if (CanElideOverflowCheck(CGF.getContext(), Ops))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
@@ -941,48 +958,233 @@ void ScalarExprEmitter::EmitFloatConversionCheck(
SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
}
+// Should be called within CodeGenFunction::SanitizerScope RAII scope.
+// Returns 'i1 false' when the truncation Src -> Dst was lossy.
+static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
+ QualType DstType, CGBuilderTy &Builder) {
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = Dst->getType();
+ (void)DstTy; // Only used in assert()
+
+ // This should be truncation of integral types.
+ assert(Src != Dst);
+ assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
+ assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
+ "non-integer llvm type");
+
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+
+ // If both (src and dst) types are unsigned, then it's an unsigned truncation.
+ // Else, it is a signed truncation.
+ ScalarExprEmitter::ImplicitConversionCheckKind Kind;
+ SanitizerMask Mask;
+ if (!SrcSigned && !DstSigned) {
+ Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
+ Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
+ } else {
+ Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
+ Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
+ }
+
+ llvm::Value *Check = nullptr;
+ // 1. Extend the truncated value back to the same width as the Src.
+ Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
+ // 2. Equality-compare with the original source value
+ Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
+ // If the comparison result is 'i1 false', then the truncation was lossy.
+ return std::make_pair(Kind, std::make_pair(Check, Mask));
+}
+
void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
Value *Dst, QualType DstType,
SourceLocation Loc) {
- if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerTruncation))
+ if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
return;
- llvm::Type *SrcTy = Src->getType();
- llvm::Type *DstTy = Dst->getType();
-
// We only care about int->int conversions here.
// We ignore conversions to/from pointer and/or bool.
if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
return;
- assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
- "clang integer type lowered to non-integer llvm type");
-
- unsigned SrcBits = SrcTy->getScalarSizeInBits();
- unsigned DstBits = DstTy->getScalarSizeInBits();
+ unsigned SrcBits = Src->getType()->getScalarSizeInBits();
+ unsigned DstBits = Dst->getType()->getScalarSizeInBits();
// This must be truncation. Else we do not care.
if (SrcBits <= DstBits)
return;
assert(!DstType->isBooleanType() && "we should not get here with booleans.");
+ // If the integer sign change sanitizer is enabled,
+ // and we are truncating from larger unsigned type to smaller signed type,
+ // let that next sanitizer deal with it.
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+ if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
+ (!SrcSigned && DstSigned))
+ return;
+
CodeGenFunction::SanitizerScope SanScope(&CGF);
+ std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+ Check =
+ EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
+ // If the comparison result is 'i1 false', then the truncation was lossy.
+
+ // Do we care about this type of truncation?
+ if (!CGF.SanOpts.has(Check.second.second))
+ return;
+
+ llvm::Constant *StaticArgs[] = {
+ CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
+ CGF.EmitCheckTypeDescriptor(DstType),
+ llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
+ CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
+ {Src, Dst});
+}
+
+// Should be called within CodeGenFunction::SanitizerScope RAII scope.
+// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
+static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
+ QualType DstType, CGBuilderTy &Builder) {
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = Dst->getType();
+
+ assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
+ "non-integer llvm type");
+
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+ (void)SrcSigned; // Only used in assert()
+ (void)DstSigned; // Only used in assert()
+ unsigned SrcBits = SrcTy->getScalarSizeInBits();
+ unsigned DstBits = DstTy->getScalarSizeInBits();
+ (void)SrcBits; // Only used in assert()
+ (void)DstBits; // Only used in assert()
+
+ assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
+ "either the widths should be different, or the signednesses.");
+
+ // NOTE: zero value is considered to be non-negative.
+ auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
+ const char *Name) -> Value * {
+ // Is this value a signed type?
+ bool VSigned = VType->isSignedIntegerOrEnumerationType();
+ llvm::Type *VTy = V->getType();
+ if (!VSigned) {
+ // If the value is unsigned, then it is never negative.
+ // FIXME: can we encounter non-scalar VTy here?
+ return llvm::ConstantInt::getFalse(VTy->getContext());
+ }
+ // Get the zero of the same type with which we will be comparing.
+ llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
+ // %V.isnegative = icmp slt %V, 0
+ // I.e is %V *strictly* less than zero, does it have negative value?
+ return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
+ llvm::Twine(Name) + "." + V->getName() +
+ ".negativitycheck");
+ };
+
+ // 1. Was the old Value negative?
+ llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
+ // 2. Is the new Value negative?
+ llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
+ // 3. Now, was the 'negativity status' preserved during the conversion?
+ // NOTE: conversion from negative to zero is considered to change the sign.
+ // (We want to get 'false' when the conversion changed the sign)
+ // So we should just equality-compare the negativity statuses.
llvm::Value *Check = nullptr;
+ Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
+ // If the comparison result is 'false', then the conversion changed the sign.
+ return std::make_pair(
+ ScalarExprEmitter::ICCK_IntegerSignChange,
+ std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
+}
- // 1. Extend the truncated value back to the same width as the Src.
- bool InputSigned = DstType->isSignedIntegerOrEnumerationType();
- Check = Builder.CreateIntCast(Dst, SrcTy, InputSigned, "anyext");
- // 2. Equality-compare with the original source value
- Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
- // If the comparison result is 'i1 false', then the truncation was lossy.
+void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
+ Value *Dst, QualType DstType,
+ SourceLocation Loc) {
+ if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
+ return;
+
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = Dst->getType();
+
+ // We only care about int->int conversions here.
+ // We ignore conversions to/from pointer and/or bool.
+ if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
+ return;
+
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+ unsigned SrcBits = SrcTy->getScalarSizeInBits();
+ unsigned DstBits = DstTy->getScalarSizeInBits();
+
+ // Now, we do not need to emit the check in *all* of the cases.
+ // We can avoid emitting it in some obvious cases where it would have been
+ // dropped by the opt passes (instcombine) always anyways.
+ // If it's a cast between effectively the same type, no check.
+ // NOTE: this is *not* equivalent to checking the canonical types.
+ if (SrcSigned == DstSigned && SrcBits == DstBits)
+ return;
+ // At least one of the values needs to have signed type.
+ // If both are unsigned, then obviously, neither of them can be negative.
+ if (!SrcSigned && !DstSigned)
+ return;
+ // If the conversion is to *larger* *signed* type, then no check is needed.
+ // Because either sign-extension happens (so the sign will remain),
+ // or zero-extension will happen (the sign bit will be zero.)
+ if ((DstBits > SrcBits) && DstSigned)
+ return;
+ if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
+ (SrcBits > DstBits) && SrcSigned) {
+ // If the signed integer truncation sanitizer is enabled,
+ // and this is a truncation from signed type, then no check is needed.
+ // Because here sign change check is interchangeable with truncation check.
+ return;
+ }
+ // That's it. We can't rule out any more cases with the data we have.
+
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+
+ std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+ Check;
+
+ // Each of these checks needs to return 'false' when an issue was detected.
+ ImplicitConversionCheckKind CheckKind;
+ llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
+ // So we can 'and' all the checks together, and still get 'false',
+ // if at least one of the checks detected an issue.
+
+ Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
+ CheckKind = Check.first;
+ Checks.emplace_back(Check.second);
+
+ if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
+ (SrcBits > DstBits) && !SrcSigned && DstSigned) {
+ // If the signed integer truncation sanitizer was enabled,
+ // and we are truncating from larger unsigned type to smaller signed type,
+ // let's handle the case we skipped in that check.
+ Check =
+ EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
+ CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
+ Checks.emplace_back(Check.second);
+ // If the comparison result is 'i1 false', then the truncation was lossy.
+ }
llvm::Constant *StaticArgs[] = {
CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
CGF.EmitCheckTypeDescriptor(DstType),
- llvm::ConstantInt::get(Builder.getInt8Ty(), ICCK_IntegerTruncation)};
- CGF.EmitCheck(std::make_pair(Check, SanitizerKind::ImplicitIntegerTruncation),
- SanitizerHandler::ImplicitConversion, StaticArgs, {Src, Dst});
+ llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
+ // EmitCheck() will 'and' all the checks together.
+ CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
+ {Src, Dst});
}
/// Emit a conversion from the specified type to the specified destination type,
@@ -991,6 +1193,27 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
QualType DstType,
SourceLocation Loc,
ScalarConversionOpts Opts) {
+ // All conversions involving fixed point types should be handled by the
+ // EmitFixedPoint family functions. This is done to prevent bloating up this
+ // function more, and although fixed point numbers are represented by
+ // integers, we do not want to follow any logic that assumes they should be
+ // treated as integers.
+ // TODO(leonardchan): When necessary, add another if statement checking for
+ // conversions to fixed point types from other types.
+ if (SrcType->isFixedPointType()) {
+ if (DstType->isFixedPointType()) {
+ return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
+ } else if (DstType->isBooleanType()) {
+ // We do not need to check the padding bit on unsigned types if unsigned
+ // padding is enabled because overflow into this bit is undefined
+ // behavior.
+ return Builder.CreateIsNotNull(Src, "tobool");
+ }
+
+ llvm_unreachable(
+ "Unhandled scalar conversion involving a fixed point type.");
+ }
+
QualType NoncanonicalSrcType = SrcType;
QualType NoncanonicalDstType = DstType;
@@ -1036,8 +1259,13 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
}
// Ignore conversions like int -> uint.
- if (SrcTy == DstTy)
+ if (SrcTy == DstTy) {
+ if (Opts.EmitImplicitIntegerSignChangeChecks)
+ EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
+ NoncanonicalDstType, Loc);
+
return Src;
+ }
// Handle pointer conversions next: pointers can only be converted to/from
// other pointers and integers. Check for pointer types in terms of LLVM, as
@@ -1181,9 +1409,91 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
NoncanonicalDstType, Loc);
+ if (Opts.EmitImplicitIntegerSignChangeChecks)
+ EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
+ NoncanonicalDstType, Loc);
+
return Res;
}
+Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
+ QualType DstTy,
+ SourceLocation Loc) {
+ using llvm::APInt;
+ using llvm::ConstantInt;
+ using llvm::Value;
+
+ assert(SrcTy->isFixedPointType());
+ assert(DstTy->isFixedPointType());
+
+ FixedPointSemantics SrcFPSema =
+ CGF.getContext().getFixedPointSemantics(SrcTy);
+ FixedPointSemantics DstFPSema =
+ CGF.getContext().getFixedPointSemantics(DstTy);
+ unsigned SrcWidth = SrcFPSema.getWidth();
+ unsigned DstWidth = DstFPSema.getWidth();
+ unsigned SrcScale = SrcFPSema.getScale();
+ unsigned DstScale = DstFPSema.getScale();
+ bool SrcIsSigned = SrcFPSema.isSigned();
+ bool DstIsSigned = DstFPSema.isSigned();
+
+ llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
+
+ Value *Result = Src;
+ unsigned ResultWidth = SrcWidth;
+
+ if (!DstFPSema.isSaturated()) {
+ // Downscale.
+ if (DstScale < SrcScale)
+ Result = SrcIsSigned ?
+ Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
+ Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
+
+ // Resize.
+ Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
+
+ // Upscale.
+ if (DstScale > SrcScale)
+ Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
+ } else {
+ // Adjust the number of fractional bits.
+ if (DstScale > SrcScale) {
+ ResultWidth = SrcWidth + DstScale - SrcScale;
+ llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
+ Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
+ Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
+ } else if (DstScale < SrcScale) {
+ Result = SrcIsSigned ?
+ Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
+ Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
+ }
+
+ // Handle saturation.
+ bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
+ if (LessIntBits) {
+ Value *Max = ConstantInt::get(
+ CGF.getLLVMContext(),
+ APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
+ Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
+ : Builder.CreateICmpUGT(Result, Max);
+ Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
+ }
+ // Cannot overflow min to dest type if src is unsigned since all fixed
+ // point types can cover the unsigned min of 0.
+ if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
+ Value *Min = ConstantInt::get(
+ CGF.getLLVMContext(),
+ APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
+ Value *TooLow = Builder.CreateICmpSLT(Result, Min);
+ Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
+ }
+
+ // Resize the integer part to get the final destination size.
+ Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
+ }
+ return Result;
+}
+
/// Emit a conversion from the specified complex type to the specified
/// destination type, where the destination type is an LLVM scalar type.
Value *ScalarExprEmitter::EmitComplexToScalarConversion(
@@ -1405,10 +1715,11 @@ Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
CGF.EmitIgnoredExpr(E->getBase());
- return emitConstant(Constant, E);
+ return CGF.emitScalarConstant(Constant, E);
} else {
- llvm::APSInt Value;
- if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
+ Expr::EvalResult Result;
+ if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
+ llvm::APSInt Value = Result.Val.getInt();
CGF.EmitIgnoredExpr(E->getBase());
return Builder.getInt(Value);
}
@@ -1874,11 +2185,22 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Builder.CreateVectorSplat(NumElements, Elt, "splat");
}
+ case CK_FixedPointCast:
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+
+ case CK_FixedPointToBoolean:
+ assert(E->getType()->isFixedPointType() &&
+ "Expected src type to be fixed point type");
+ assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+
case CK_IntegralCast: {
ScalarConversionOpts Opts;
- if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerTruncation)) {
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE))
- Opts.EmitImplicitIntegerTruncationChecks = !ICE->isPartOfExplicitCast();
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
+ if (!ICE->isPartOfExplicitCast())
+ Opts = ScalarConversionOpts(CGF.SanOpts);
}
return EmitScalarConversion(Visit(E), E->getType(), DestTy,
CE->getExprLoc(), Opts);
@@ -1919,13 +2241,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
CE->getExprLoc());
}
- case CK_ZeroToOCLEvent: {
- assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non-event type");
- return llvm::Constant::getNullValue(ConvertType(DestTy));
- }
-
- case CK_ZeroToOCLQueue: {
- assert(DestTy->isQueueT() && "CK_ZeroToOCLQueue cast on non queue_t type");
+ case CK_ZeroToOCLOpaqueType: {
+ assert((DestTy->isEventT() || DestTy->isQueueT() ||
+ DestTy->isOCLIntelSubgroupAVCType()) &&
+ "CK_ZeroToOCLEvent cast on non-event type");
return llvm::Constant::getNullValue(ConvertType(DestTy));
}
@@ -1984,7 +2303,7 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(InVal, Amount, Name);
- // Fall through.
+ LLVM_FALLTHROUGH;
case LangOptions::SOB_Trapping:
if (!E->canOverflow())
return Builder.CreateNSWAdd(InVal, Amount, Name);
@@ -2279,9 +2598,11 @@ Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Try folding the offsetof to a constant.
- llvm::APSInt Value;
- if (E->EvaluateAsInt(Value, CGF.getContext()))
+ Expr::EvalResult EVResult;
+ if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
+ llvm::APSInt Value = EVResult.Val.getInt();
return Builder.getInt(Value);
+ }
// Loop over the components of the offsetof to compute the value.
unsigned n = E->getNumComponents();
@@ -2550,9 +2871,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
- // Convert the result back to the LHS type.
- Result =
- EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, Loc);
+ // Convert the result back to the LHS type,
+ // potentially with Implicit Conversion sanitizer check.
+ Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
+ Loc, ScalarConversionOpts(CGF.SanOpts));
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
@@ -2990,7 +3312,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
- // Fall through.
+ LLVM_FALLTHROUGH;
case LangOptions::SOB_Trapping:
if (CanElideOverflowCheck(CGF.getContext(), op))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
@@ -3025,7 +3347,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
- // Fall through.
+ LLVM_FALLTHROUGH;
case LangOptions::SOB_Trapping:
if (CanElideOverflowCheck(CGF.getContext(), op))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
diff --git a/lib/CodeGen/CGLoopInfo.cpp b/lib/CodeGen/CGLoopInfo.cpp
index 8f9a9b9607..169ae4fcde 100644
--- a/lib/CodeGen/CGLoopInfo.cpp
+++ b/lib/CodeGen/CGLoopInfo.cpp
@@ -10,8 +10,8 @@
#include "CGLoopInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
-#include "clang/Sema/LoopHint.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
@@ -335,10 +335,10 @@ void LoopInfoStack::InsertHelper(Instruction *I) const {
if (!L.getLoopID())
return;
- if (TerminatorInst *TI = dyn_cast<TerminatorInst>(I)) {
- for (unsigned i = 0, ie = TI->getNumSuccessors(); i < ie; ++i)
- if (TI->getSuccessor(i) == L.getHeader()) {
- TI->setMetadata(llvm::LLVMContext::MD_loop, L.getLoopID());
+ if (I->isTerminator()) {
+ for (BasicBlock *Succ : successors(I))
+ if (Succ == L.getHeader()) {
+ I->setMetadata(llvm::LLVMContext::MD_loop, L.getLoopID());
break;
}
return;
diff --git a/lib/CodeGen/CGNonTrivialStruct.cpp b/lib/CodeGen/CGNonTrivialStruct.cpp
index e9f60a9113..c6a96a9126 100644
--- a/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -187,6 +187,7 @@ template <class Derived> struct GenFuncNameBase {
if (!FK)
return asDerived().visitTrivial(QualType(AT, 0), FD, CurStructOffset);
+ asDerived().flushTrivialFields();
CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD);
ASTContext &Ctx = asDerived().getContext();
const ConstantArrayType *CAT = cast<ConstantArrayType>(AT);
@@ -336,6 +337,7 @@ template <class Derived> struct GenFuncBase {
return asDerived().visitTrivial(QualType(AT, 0), FD, CurStackOffset,
Addrs);
+ asDerived().flushTrivialFields(Addrs);
CodeGenFunction &CGF = *this->CGF;
ASTContext &Ctx = CGF.getContext();
@@ -456,12 +458,13 @@ template <class Derived> struct GenFuncBase {
llvm::Function::Create(FuncTy, llvm::GlobalValue::LinkOnceODRLinkage,
FuncName, &CGM.getModule());
F->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CGM.SetLLVMFunctionAttributes(nullptr, FI, F);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
IdentifierInfo *II = &Ctx.Idents.get(FuncName);
FunctionDecl *FD = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
- II, Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false);
+ II, Ctx.getFunctionType(Ctx.VoidTy, llvm::None, {}), nullptr,
+ SC_PrivateExtern, false, false);
CodeGenFunction NewCGF(CGM);
setCGF(&NewCGF);
CGF->StartFunction(FD, Ctx.VoidTy, F, FI, Args);
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index c62f40b790..cc582b926b 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -352,6 +352,56 @@ static const Expr *findWeakLValue(const Expr *E) {
return nullptr;
}
+/// The ObjC runtime may provide entrypoints that are likely to be faster
+/// than an ordinary message send of the appropriate selector.
+///
+/// The entrypoints are guaranteed to be equivalent to just sending the
+/// corresponding message. If the entrypoint is implemented naively as just a
+/// message send, using it is a trade-off: it sacrifices a few cycles of
+/// overhead to save a small amount of code. However, it's possible for
+/// runtimes to detect and special-case classes that use "standard"
+/// behavior; if that's dynamically a large proportion of all objects, using
+/// the entrypoint will also be faster than using a message send.
+///
+/// If the runtime does support a required entrypoint, then this method will
+/// generate a call and return the resulting value. Otherwise it will return
+/// None and the caller can generate a msgSend instead.
+static Optional<llvm::Value *>
+tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
+ llvm::Value *Receiver,
+ const CallArgList& Args, Selector Sel,
+ const ObjCMethodDecl *method) {
+ auto &CGM = CGF.CGM;
+ if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls)
+ return None;
+
+ auto &Runtime = CGM.getLangOpts().ObjCRuntime;
+ switch (Sel.getMethodFamily()) {
+ case OMF_alloc:
+ if (Runtime.shouldUseRuntimeFunctionsForAlloc() &&
+ ResultType->isObjCObjectPointerType()) {
+ // [Foo alloc] -> objc_alloc(Foo)
+ if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc")
+ return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType));
+ // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo)
+ if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 &&
+ Args.size() == 1 && Args.front().getType()->isPointerType() &&
+ Sel.getNameForSlot(0) == "allocWithZone") {
+ const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal();
+ if (isa<llvm::ConstantPointerNull>(arg))
+ return CGF.EmitObjCAllocWithZone(Receiver,
+ CGF.ConvertType(ResultType));
+ return None;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ return None;
+}
+
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return) {
// Only the lookup mechanism and first two arguments of the method
@@ -474,10 +524,16 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Args,
method);
} else {
- result = Runtime.GenerateMessageSend(*this, Return, ResultType,
- E->getSelector(),
- Receiver, Args, OID,
- method);
+ // Call runtime methods directly if we can.
+ if (Optional<llvm::Value *> SpecializedResult =
+ tryGenerateSpecializedMessageSend(*this, ResultType, Receiver, Args,
+ E->getSelector(), method)) {
+ result = RValue::get(SpecializedResult.getValue());
+ } else {
+ result = Runtime.GenerateMessageSend(*this, Return, ResultType,
+ E->getSelector(), Receiver, Args,
+ OID, method);
+ }
}
// For delegate init calls in ARC, implicitly store the result of
@@ -568,7 +624,7 @@ static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
LValue lvalue, QualType type);
/// Generate an Objective-C method. An Objective-C method is a C function with
-/// its pointer, name, and types registered in the class struture.
+/// its pointer, name, and types registered in the class structure.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
StartObjCMethod(OMD, OMD->getClassInterface());
PGO.assignRegionCounters(GlobalDecl(OMD), CurFn);
@@ -883,9 +939,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// If there's a non-trivial 'get' expression, we just have to emit that.
if (!hasTrivialGetExpr(propImpl)) {
if (!AtomicHelperFn) {
- ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
- /*nrvo*/ nullptr);
- EmitReturnStmt(ret);
+ auto *ret = ReturnStmt::Create(getContext(), SourceLocation(),
+ propImpl->getGetterCXXConstructor(),
+ /* NRVOCandidate=*/nullptr);
+ EmitReturnStmt(*ret);
}
else {
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
@@ -1844,6 +1901,7 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
/// where a null input causes a no-op and returns null.
static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
llvm::Value *value,
+ llvm::Type *returnType,
llvm::Constant *&fn,
StringRef fnName,
bool isTailCall = false) {
@@ -1857,7 +1915,7 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
}
// Cast the argument to 'id'.
- llvm::Type *origType = value->getType();
+ llvm::Type *origType = returnType ? returnType : value->getType();
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
// Call the function.
@@ -1963,7 +2021,7 @@ llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
/// Retain the given object, with normal retain semantics.
/// call i8* \@objc_retain(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retain,
"objc_retain");
}
@@ -1977,7 +2035,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
bool mandatory) {
llvm::Value *result
- = emitARCValueOperation(*this, value,
+ = emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainBlock,
"objc_retainBlock");
@@ -2047,7 +2105,7 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
emitAutoreleasedReturnValueMarker(*this);
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
"objc_retainAutoreleasedReturnValue");
}
@@ -2062,7 +2120,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
llvm::Value *
CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
emitAutoreleasedReturnValueMarker(*this);
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
"objc_unsafeClaimAutoreleasedReturnValue");
}
@@ -2177,7 +2235,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
/// Autorelease the given object.
/// call i8* \@objc_autorelease(i8* %value)
llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_autorelease,
"objc_autorelease");
}
@@ -2186,7 +2244,7 @@ llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
/// call i8* \@objc_autoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
"objc_autoreleaseReturnValue",
/*isTailCall*/ true);
@@ -2196,7 +2254,7 @@ CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
/// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
"objc_retainAutoreleaseReturnValue",
/*isTailCall*/ true);
@@ -2225,7 +2283,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
/// call i8* \@objc_retainAutorelease(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainAutorelease,
"objc_retainAutorelease");
}
@@ -2384,6 +2442,24 @@ llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
return InitRV.getScalarVal();
}
+/// Allocate the given objc object.
+/// call i8* \@objc_alloc(i8* %value)
+llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value,
+ llvm::Type *resultType) {
+ return emitARCValueOperation(*this, value, resultType,
+ CGM.getObjCEntrypoints().objc_alloc,
+ "objc_alloc");
+}
+
+/// Allocate the given objc object.
+/// call i8* \@objc_allocWithZone(i8* %value)
+llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value,
+ llvm::Type *resultType) {
+ return emitARCValueOperation(*this, value, resultType,
+ CGM.getObjCEntrypoints().objc_allocWithZone,
+ "objc_allocWithZone");
+}
+
/// Produce the code to do a primitive release.
/// [tmp drain];
void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
@@ -2446,27 +2522,36 @@ void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
}
-static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
- LValue lvalue,
- QualType type) {
- switch (type.getObjCLifetime()) {
+static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) {
+ switch (lifetime) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
- return TryEmitResult(CGF.EmitLoadOfLValue(lvalue,
- SourceLocation()).getScalarVal(),
- false);
+ return true;
case Qualifiers::OCL_Weak:
- return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
- true);
+ return false;
}
llvm_unreachable("impossible lifetime!");
}
static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ llvm::Value *result;
+ bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime());
+ if (shouldRetain) {
+ result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal();
+ } else {
+ assert(type.getObjCLifetime() == Qualifiers::OCL_Weak);
+ result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress());
+ }
+ return TryEmitResult(result, !shouldRetain);
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
const Expr *e) {
e = e->IgnoreParens();
QualType type = e->getType();
@@ -2500,6 +2585,16 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
return TryEmitResult(CGF.EmitScalarExpr(e), false);
+ // Try to emit code for scalar constant instead of emitting LValue and
+ // loading it because we are not guaranteed to have an l-value. One of such
+ // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable.
+ if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) {
+ auto *DRE = const_cast<DeclRefExpr *>(decl_expr);
+ if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE))
+ return TryEmitResult(CGF.emitScalarConstant(constant, DRE),
+ !shouldRetainObjCLifetime(type.getObjCLifetime()));
+ }
+
return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
}
@@ -3229,29 +3324,32 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
ASTContext &C = getContext();
IdentifierInfo *II
= &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
- FunctionDecl *FD = FunctionDecl::Create(C,
- C.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, C.VoidTy,
- nullptr, SC_Static,
- false,
- false);
+ QualType ReturnTy = C.VoidTy;
QualType DestTy = C.getPointerType(Ty);
QualType SrcTy = Ty;
SrcTy.addConst();
SrcTy = C.getPointerType(SrcTy);
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(DestTy);
+ ArgTys.push_back(SrcTy);
+ QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
+
+ FunctionDecl *FD = FunctionDecl::Create(
+ C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
+ FunctionTy, nullptr, SC_Static, false, false);
+
FunctionArgList args;
- ImplicitParamDecl DstDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
- DestTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
+ ImplicitParamDecl::Other);
args.push_back(&DstDecl);
- ImplicitParamDecl SrcDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
- SrcTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
+ ImplicitParamDecl::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
@@ -3262,7 +3360,7 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
- StartFunction(FD, C.VoidTy, Fn, FI, args);
+ StartFunction(FD, ReturnTy, Fn, FI, args);
DeclRefExpr DstExpr(&DstDecl, false, DestTy,
VK_RValue, SourceLocation());
@@ -3301,50 +3399,51 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
return nullptr;
llvm::Constant *HelperFn = nullptr;
-
if (hasTrivialGetExpr(PID))
return nullptr;
assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
return HelperFn;
-
ASTContext &C = getContext();
- IdentifierInfo *II
- = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
- FunctionDecl *FD = FunctionDecl::Create(C,
- C.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, C.VoidTy,
- nullptr, SC_Static,
- false,
- false);
+ IdentifierInfo *II =
+ &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
+ QualType ReturnTy = C.VoidTy;
QualType DestTy = C.getPointerType(Ty);
QualType SrcTy = Ty;
SrcTy.addConst();
SrcTy = C.getPointerType(SrcTy);
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(DestTy);
+ ArgTys.push_back(SrcTy);
+ QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
+
+ FunctionDecl *FD = FunctionDecl::Create(
+ C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
+ FunctionTy, nullptr, SC_Static, false, false);
+
FunctionArgList args;
- ImplicitParamDecl DstDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
- DestTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
+ ImplicitParamDecl::Other);
args.push_back(&DstDecl);
- ImplicitParamDecl SrcDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
- SrcTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
+ ImplicitParamDecl::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn =
- llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- "__copy_helper_atomic_property_", &CGM.getModule());
+ llvm::Function *Fn = llvm::Function::Create(
+ LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_",
+ &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
- StartFunction(FD, C.VoidTy, Fn, FI, args);
+ StartFunction(FD, ReturnTy, Fn, FI, args);
DeclRefExpr SrcExpr(&SrcDecl, false, SrcTy,
VK_RValue, SourceLocation());
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 2b6d895f93..d91eb43ca3 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -23,9 +23,9 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/LangOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
@@ -7188,15 +7188,21 @@ CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name,
Weak ? llvm::GlobalValue::ExternalWeakLinkage
: llvm::GlobalValue::ExternalLinkage;
-
-
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (!GV) {
- GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
- false, L, nullptr, Name);
+ if (!GV || GV->getType() != ObjCTypes.ClassnfABITy->getPointerTo()) {
+ auto *NewGV = new llvm::GlobalVariable(ObjCTypes.ClassnfABITy, false, L,
+ nullptr, Name);
if (DLLImport)
- GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ NewGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+
+ if (GV) {
+ GV->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(NewGV, GV->getType()));
+ GV->eraseFromParent();
+ }
+ GV = NewGV;
+ CGM.getModule().getGlobalList().push_back(GV);
}
assert(GV->getLinkage() == L);
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 8390bca737..4b6f24a03f 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -296,7 +296,7 @@ void CGObjCRuntime::EmitInitOfCatchParam(CodeGenFunction &CGF,
switch (paramDecl->getType().getQualifiers().getObjCLifetime()) {
case Qualifiers::OCL_Strong:
exn = CGF.EmitARCRetainNonBlock(exn);
- // fallthrough
+ LLVM_FALLTHROUGH;
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
diff --git a/lib/CodeGen/CGOpenCLRuntime.cpp b/lib/CodeGen/CGOpenCLRuntime.cpp
index 1da19a90c3..7f6f595dd5 100644
--- a/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -62,6 +62,11 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
case BuiltinType::OCLReserveID:
return llvm::PointerType::get(
llvm::StructType::create(Ctx, "opencl.reserve_id_t"), AddrSpc);
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ return llvm::PointerType::get( \
+ llvm::StructType::create(Ctx, "opencl." #ExtType), AddrSpc);
+#include "clang/Basic/OpenCLExtensionTypes.def"
}
}
@@ -118,25 +123,6 @@ llvm::PointerType *CGOpenCLRuntime::getGenericVoidPointerType() {
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
}
-// Get the block literal from an expression derived from the block expression.
-// OpenCL v2.0 s6.12.5:
-// Block variable declarations are implicitly qualified with const. Therefore
-// all block variables must be initialized at declaration time and may not be
-// reassigned.
-static const BlockExpr *getBlockExpr(const Expr *E) {
- if (auto Cast = dyn_cast<CastExpr>(E)) {
- E = Cast->getSubExpr();
- }
- if (auto DR = dyn_cast<DeclRefExpr>(E)) {
- E = cast<VarDecl>(DR->getDecl())->getInit();
- }
- E = E->IgnoreImplicit();
- if (auto Cast = dyn_cast<CastExpr>(E)) {
- E = Cast->getSubExpr();
- }
- return cast<BlockExpr>(E);
-}
-
/// Record emitted llvm invoke function and llvm block literal for the
/// corresponding block expression.
void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
@@ -151,15 +137,21 @@ void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
EnqueuedBlockMap[E].Kernel = nullptr;
}
-llvm::Function *CGOpenCLRuntime::getInvokeFunction(const Expr *E) {
- return EnqueuedBlockMap[getBlockExpr(E)].InvokeFunc;
-}
-
CGOpenCLRuntime::EnqueuedBlockInfo
CGOpenCLRuntime::emitOpenCLEnqueuedBlock(CodeGenFunction &CGF, const Expr *E) {
CGF.EmitScalarExpr(E);
- const BlockExpr *Block = getBlockExpr(E);
+ // The block literal may be assigned to a const variable. Chasing down
+ // to get the block literal.
+ if (auto DR = dyn_cast<DeclRefExpr>(E)) {
+ E = cast<VarDecl>(DR->getDecl())->getInit();
+ }
+ E = E->IgnoreImplicit();
+ if (auto Cast = dyn_cast<CastExpr>(E)) {
+ E = Cast->getSubExpr();
+ }
+ auto *Block = cast<BlockExpr>(E);
+
assert(EnqueuedBlockMap.find(Block) != EnqueuedBlockMap.end() &&
"Block expression not emitted");
diff --git a/lib/CodeGen/CGOpenCLRuntime.h b/lib/CodeGen/CGOpenCLRuntime.h
index a513340827..3da55af065 100644
--- a/lib/CodeGen/CGOpenCLRuntime.h
+++ b/lib/CodeGen/CGOpenCLRuntime.h
@@ -91,10 +91,6 @@ public:
/// \param Block block literal emitted for the block expression.
void recordBlockInfo(const BlockExpr *E, llvm::Function *InvokeF,
llvm::Value *Block);
-
- /// \return LLVM block invoke function emitted for an expression derived from
- /// the block expression.
- llvm::Function *getInvokeFunction(const Expr *E);
};
}
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index d421729faa..66f0783e27 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1223,6 +1223,17 @@ CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
void CGOpenMPRuntime::clear() {
InternalVars.clear();
+ // Clean non-target variable declarations possibly used only in debug info.
+ for (const auto &Data : EmittedNonTargetVariables) {
+ if (!Data.getValue().pointsToAliveValue())
+ continue;
+ auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
+ if (!GV)
+ continue;
+ if (!GV->isDeclaration() || GV->getNumUses() > 0)
+ continue;
+ GV->eraseFromParent();
+ }
}
std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
@@ -1456,7 +1467,9 @@ createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
- llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
+ unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
+ FlagsTy FlagsKey(Flags, Reserved2Flags);
+ llvm::Value *Entry = OpenMPDefaultLocMap.lookup(FlagsKey);
if (!Entry) {
if (!DefaultOpenMPPSource) {
// Initialize default location for psource field of ident_t structure of
@@ -1469,22 +1482,47 @@ Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
- llvm::Constant *Data[] = {llvm::ConstantInt::getNullValue(CGM.Int32Ty),
- llvm::ConstantInt::get(CGM.Int32Ty, Flags),
- llvm::ConstantInt::getNullValue(CGM.Int32Ty),
- llvm::ConstantInt::getNullValue(CGM.Int32Ty),
- DefaultOpenMPPSource};
+ llvm::Constant *Data[] = {
+ llvm::ConstantInt::getNullValue(CGM.Int32Ty),
+ llvm::ConstantInt::get(CGM.Int32Ty, Flags),
+ llvm::ConstantInt::get(CGM.Int32Ty, Reserved2Flags),
+ llvm::ConstantInt::getNullValue(CGM.Int32Ty), DefaultOpenMPPSource};
llvm::GlobalValue *DefaultOpenMPLocation =
- createGlobalStruct(CGM, IdentQTy, /*IsConstant=*/false, Data, "",
+ createGlobalStruct(CGM, IdentQTy, isDefaultLocationConstant(), Data, "",
llvm::GlobalValue::PrivateLinkage);
DefaultOpenMPLocation->setUnnamedAddr(
llvm::GlobalValue::UnnamedAddr::Global);
- OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
+ OpenMPDefaultLocMap[FlagsKey] = Entry = DefaultOpenMPLocation;
}
return Address(Entry, Align);
}
+void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
+ bool AtCurrentPoint) {
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ assert(!Elem.second.ServiceInsertPt && "Insert point is set already.");
+
+ llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
+ if (AtCurrentPoint) {
+ Elem.second.ServiceInsertPt = new llvm::BitCastInst(
+ Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
+ } else {
+ Elem.second.ServiceInsertPt =
+ new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
+ Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
+ }
+}
+
+void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ if (Elem.second.ServiceInsertPt) {
+ llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
+ Elem.second.ServiceInsertPt = nullptr;
+ Ptr->eraseFromParent();
+ }
+}
+
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned Flags) {
@@ -1511,8 +1549,10 @@ llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
Elem.second.DebugLoc = AI.getPointer();
LocValue = AI;
+ if (!Elem.second.ServiceInsertPt)
+ setLocThreadIdInsertPt(CGF);
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
- CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
+ CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
CGF.getTypeSize(IdentQTy));
}
@@ -1582,21 +1622,25 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
// kmpc_global_thread_num(ident_t *loc).
// Generate thread id value and cache this value for use across the
// function.
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ if (!Elem.second.ServiceInsertPt)
+ setLocThreadIdInsertPt(CGF);
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
- CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
+ CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
llvm::CallInst *Call = CGF.Builder.CreateCall(
createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
emitUpdateLocation(CGF, Loc));
Call->setCallingConv(CGF.getRuntimeCC());
- auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
Elem.second.ThreadID = Call;
return Call;
}
void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
- if (OpenMPLocThreadIDMap.count(CGF.CurFn))
+ if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
+ clearLocThreadIdInsertPt(CGF);
OpenMPLocThreadIDMap.erase(CGF.CurFn);
+ }
if (FunctionUDRMap.count(CGF.CurFn) > 0) {
for(auto *D : FunctionUDRMap[CGF.CurFn])
UDRMap.erase(D);
@@ -2470,8 +2514,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
return nullptr;
VD = VD->getDefinition(CGM.getContext());
- if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
- ThreadPrivateWithDefinition.insert(VD);
+ if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
QualType ASTTy = VD->getType();
llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
@@ -2617,7 +2660,7 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
return CGM.getLangOpts().OpenMPIsDevice;
VD = VD->getDefinition(CGM.getContext());
- if (VD && !DeclareTargetWithDefinition.insert(VD).second)
+ if (VD && !DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
return CGM.getLangOpts().OpenMPIsDevice;
QualType ASTTy = VD->getType();
@@ -3171,13 +3214,7 @@ void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
}
-void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPDirectiveKind Kind, bool EmitChecks,
- bool ForceSimpleCall) {
- if (!CGF.HaveInsertPoint())
- return;
- // Build call __kmpc_cancel_barrier(loc, thread_id);
- // Build call __kmpc_barrier(loc, thread_id);
+unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) {
unsigned Flags;
if (Kind == OMPD_for)
Flags = OMP_IDENT_BARRIER_IMPL_FOR;
@@ -3189,6 +3226,17 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
Flags = OMP_IDENT_BARRIER_EXPL;
else
Flags = OMP_IDENT_BARRIER_IMPL;
+ return Flags;
+}
+
+void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool EmitChecks,
+ bool ForceSimpleCall) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ // Build call __kmpc_barrier(loc, thread_id);
+ unsigned Flags = getDefaultFlagsForBarriers(Kind);
// Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
// thread_id);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
@@ -3261,6 +3309,18 @@ bool CGOpenMPRuntime::isStaticNonchunked(
return Schedule == OMP_dist_sch_static;
}
+bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) const {
+ OpenMPSchedType Schedule =
+ getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
+ return Schedule == OMP_sch_static_chunked;
+}
+
+bool CGOpenMPRuntime::isStaticChunked(
+ OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
+ OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
+ return Schedule == OMP_dist_sch_static_chunked;
+}
bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
OpenMPSchedType Schedule =
@@ -3881,6 +3941,8 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
llvm::LLVMContext &C = M.getContext();
SmallVector<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
OrderedEntries(OffloadEntriesInfoManager.size());
+ llvm::SmallVector<StringRef, 16> ParentFunctions(
+ OffloadEntriesInfoManager.size());
// Auxiliary methods to create metadata values and strings.
auto &&GetMDInt = [this](unsigned V) {
@@ -3895,7 +3957,7 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
// Create function that emits metadata for each target region entry;
auto &&TargetRegionMetadataEmitter =
- [&C, MD, &OrderedEntries, &GetMDInt, &GetMDString](
+ [&C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt, &GetMDString](
unsigned DeviceID, unsigned FileID, StringRef ParentName,
unsigned Line,
const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
@@ -3915,6 +3977,7 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
// Save this entry in the right position of the ordered entries array.
OrderedEntries[E.getOrder()] = &E;
+ ParentFunctions[E.getOrder()] = ParentName;
// Add metadata to the named metadata node.
MD->addOperand(llvm::MDNode::get(C, Ops));
@@ -3956,6 +4019,10 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
E)) {
if (!CE->getID() || !CE->getAddress()) {
+ // Do not blame the entry if the parent funtion is not emitted.
+ StringRef FnName = ParentFunctions[CE->getOrder()];
+ if (!CGM.GetGlobalValue(FnName))
+ continue;
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Error,
"Offloading entry for target region is incorrect: either the "
@@ -5215,8 +5282,8 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
LBLVal.getPointer(),
UBLVal.getPointer(),
CGF.EmitLoadOfScalar(StLVal, Loc),
- llvm::ConstantInt::getNullValue(
- CGF.IntTy), // Always 0 because taskgroup emitted by the compiler
+ llvm::ConstantInt::getSigned(
+ CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler
llvm::ConstantInt::getSigned(
CGF.IntTy, Data.Schedule.getPointer()
? Data.Schedule.getInt() ? NumTasks : Grainsize
@@ -6735,10 +6802,11 @@ private:
}
// Check if the length evaluates to 1.
- llvm::APSInt ConstLength;
- if (!Length->EvaluateAsInt(ConstLength, CGF.getContext()))
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, CGF.getContext()))
return true; // Can have more that size 1.
+ llvm::APSInt ConstLength = Result.Val.getInt();
return ConstLength.getSExtValue() != 1;
}
@@ -7489,6 +7557,82 @@ public:
}
}
+ /// Emit capture info for lambdas for variables captured by reference.
+ void generateInfoForLambdaCaptures(
+ const ValueDecl *VD, llvm::Value *Arg, MapBaseValuesArrayTy &BasePointers,
+ MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
+ MapFlagsArrayTy &Types,
+ llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
+ const auto *RD = VD->getType()
+ .getCanonicalType()
+ .getNonReferenceType()
+ ->getAsCXXRecordDecl();
+ if (!RD || !RD->isLambda())
+ return;
+ Address VDAddr = Address(Arg, CGF.getContext().getDeclAlign(VD));
+ LValue VDLVal = CGF.MakeAddrLValue(
+ VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
+ llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ FieldDecl *ThisCapture = nullptr;
+ RD->getCaptureFields(Captures, ThisCapture);
+ if (ThisCapture) {
+ LValue ThisLVal =
+ CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
+ LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture);
+ LambdaPointers.try_emplace(ThisLVal.getPointer(), VDLVal.getPointer());
+ BasePointers.push_back(ThisLVal.getPointer());
+ Pointers.push_back(ThisLValVal.getPointer());
+ Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
+ Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
+ OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ }
+ for (const LambdaCapture &LC : RD->captures()) {
+ if (LC.getCaptureKind() != LCK_ByRef)
+ continue;
+ const VarDecl *VD = LC.getCapturedVar();
+ auto It = Captures.find(VD);
+ assert(It != Captures.end() && "Found lambda capture without field.");
+ LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
+ LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
+ LambdaPointers.try_emplace(VarLVal.getPointer(), VDLVal.getPointer());
+ BasePointers.push_back(VarLVal.getPointer());
+ Pointers.push_back(VarLValVal.getPointer());
+ Sizes.push_back(CGF.getTypeSize(
+ VD->getType().getCanonicalType().getNonReferenceType()));
+ Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
+ OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ }
+ }
+
+ /// Set correct indices for lambdas captures.
+ void adjustMemberOfForLambdaCaptures(
+ const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
+ MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
+ MapFlagsArrayTy &Types) const {
+ for (unsigned I = 0, E = Types.size(); I < E; ++I) {
+ // Set correct member_of idx for all implicit lambda captures.
+ if (Types[I] != (OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
+ OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT))
+ continue;
+ llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
+ assert(BasePtr && "Unable to find base lambda address.");
+ int TgtIdx = -1;
+ for (unsigned J = I; J > 0; --J) {
+ unsigned Idx = J - 1;
+ if (Pointers[Idx] != BasePtr)
+ continue;
+ TgtIdx = Idx;
+ break;
+ }
+ assert(TgtIdx != -1 && "Unable to find parent lambda.");
+ // All other current entries will be MEMBER_OF the combined entry
+ // (except for PTR_AND_OBJ entries which do not have a placeholder value
+ // 0xFFFF in the MEMBER_OF field).
+ OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx);
+ setCorrectMemberOfFlag(Types[I], MemberOfFlag);
+ }
+ }
+
/// Generate the base pointers, section pointers, sizes and map types
/// associated to a given capture.
void generateInfoForCapture(const CapturedStmt::Capture *Cap,
@@ -8061,6 +8205,7 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
// Get mappable expression information.
MappableExprsHandler MEHandler(D, CGF);
+ llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
auto RI = CS.getCapturedRecordDecl()->field_begin();
auto CV = CapturedVars.begin();
@@ -8090,6 +8235,12 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
if (CurBasePointers.empty())
MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
CurPointers, CurSizes, CurMapTypes);
+ // Generate correct mapping for variables captured by reference in
+ // lambdas.
+ if (CI->capturesVariable())
+ MEHandler.generateInfoForLambdaCaptures(
+ CI->getCapturedVar(), *CV, CurBasePointers, CurPointers, CurSizes,
+ CurMapTypes, LambdaPointers);
}
// We expect to have at least an element of information for this capture.
assert(!CurBasePointers.empty() &&
@@ -8111,6 +8262,9 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
Sizes.append(CurSizes.begin(), CurSizes.end());
MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
}
+ // Adjust MEMBER_OF flags for the lambdas captures.
+ MEHandler.adjustMemberOfForLambdaCaptures(LambdaPointers, BasePointers,
+ Pointers, MapTypes);
// Map other list items in the map clause which are not captured variables
// but "declare target link" global variables.
MEHandler.generateInfoForDeclareTargetLink(BasePointers, Pointers, Sizes,
@@ -8304,14 +8458,15 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
if (!CGM.getLangOpts().OpenMPIsDevice)
return false;
- // Try to detect target regions in the function.
const ValueDecl *VD = cast<ValueDecl>(GD.getDecl());
+ StringRef Name = CGM.getMangledName(GD);
+ // Try to detect target regions in the function.
if (const auto *FD = dyn_cast<FunctionDecl>(VD))
- scanForTargetRegionsFunctions(FD->getBody(), CGM.getMangledName(GD));
+ scanForTargetRegionsFunctions(FD->getBody(), Name);
// Do not to emit function if it is not marked as declare target.
return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
- AlreadyEmittedTargetFunctions.count(VD->getCanonicalDecl()) == 0;
+ AlreadyEmittedTargetFunctions.count(Name) == 0;
}
bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
@@ -8348,54 +8503,62 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr) {
- if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
- OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
- StringRef VarName;
- CharUnits VarSize;
- llvm::GlobalValue::LinkageTypes Linkage;
- switch (*Res) {
- case OMPDeclareTargetDeclAttr::MT_To:
- Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
- VarName = CGM.getMangledName(VD);
- if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
- VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
- assert(!VarSize.isZero() && "Expected non-zero size of the variable");
- } else {
- VarSize = CharUnits::Zero();
- }
- Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
- // Temp solution to prevent optimizations of the internal variables.
- if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
- std::string RefName = getName({VarName, "ref"});
- if (!CGM.GetGlobalValue(RefName)) {
- llvm::Constant *AddrRef =
- getOrCreateInternalVariable(Addr->getType(), RefName);
- auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
- GVAddrRef->setConstant(/*Val=*/true);
- GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
- GVAddrRef->setInitializer(Addr);
- CGM.addCompilerUsedGlobal(GVAddrRef);
- }
- }
- break;
- case OMPDeclareTargetDeclAttr::MT_Link:
- Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
- if (CGM.getLangOpts().OpenMPIsDevice) {
- VarName = Addr->getName();
- Addr = nullptr;
- } else {
- VarName = getAddrOfDeclareTargetLink(VD).getName();
- Addr =
- cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
+ if (!Res) {
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ // Register non-target variables being emitted in device code (debug info
+ // may cause this).
+ StringRef VarName = CGM.getMangledName(VD);
+ EmittedNonTargetVariables.try_emplace(VarName, Addr);
+ }
+ return;
+ }
+ // Register declare target variables.
+ OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
+ StringRef VarName;
+ CharUnits VarSize;
+ llvm::GlobalValue::LinkageTypes Linkage;
+ switch (*Res) {
+ case OMPDeclareTargetDeclAttr::MT_To:
+ Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
+ VarName = CGM.getMangledName(VD);
+ if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
+ VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
+ assert(!VarSize.isZero() && "Expected non-zero size of the variable");
+ } else {
+ VarSize = CharUnits::Zero();
+ }
+ Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
+ // Temp solution to prevent optimizations of the internal variables.
+ if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
+ std::string RefName = getName({VarName, "ref"});
+ if (!CGM.GetGlobalValue(RefName)) {
+ llvm::Constant *AddrRef =
+ getOrCreateInternalVariable(Addr->getType(), RefName);
+ auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
+ GVAddrRef->setConstant(/*Val=*/true);
+ GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GVAddrRef->setInitializer(Addr);
+ CGM.addCompilerUsedGlobal(GVAddrRef);
}
- VarSize = CGM.getPointerSize();
- Linkage = llvm::GlobalValue::WeakAnyLinkage;
- break;
}
- OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
- VarName, Addr, VarSize, Flags, Linkage);
+ break;
+ case OMPDeclareTargetDeclAttr::MT_Link:
+ Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ VarName = Addr->getName();
+ Addr = nullptr;
+ } else {
+ VarName = getAddrOfDeclareTargetLink(VD).getName();
+ Addr = cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
+ }
+ VarSize = CGM.getPointerSize();
+ Linkage = llvm::GlobalValue::WeakAnyLinkage;
+ break;
}
+ OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
+ VarName, Addr, VarSize, Flags, Linkage);
}
bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
@@ -8422,6 +8585,12 @@ void CGOpenMPRuntime::emitDeferredTargetDecls() const {
}
}
+void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
+ assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
+ " Expected target-based directive.");
+}
+
CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
CodeGenModule &CGM)
: CGM(CGM) {
@@ -8440,21 +8609,20 @@ bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
return true;
+ StringRef Name = CGM.getMangledName(GD);
const auto *D = cast<FunctionDecl>(GD.getDecl());
- const FunctionDecl *FD = D->getCanonicalDecl();
// Do not to emit function if it is marked as declare target as it was already
// emitted.
if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
- if (D->hasBody() && AlreadyEmittedTargetFunctions.count(FD) == 0) {
- if (auto *F = dyn_cast_or_null<llvm::Function>(
- CGM.GetGlobalValue(CGM.getMangledName(GD))))
+ if (D->hasBody() && AlreadyEmittedTargetFunctions.count(Name) == 0) {
+ if (auto *F = dyn_cast_or_null<llvm::Function>(CGM.GetGlobalValue(Name)))
return !F->isDeclaration();
return false;
}
return true;
}
- return !AlreadyEmittedTargetFunctions.insert(FD).second;
+ return !AlreadyEmittedTargetFunctions.insert(Name).second;
}
llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
@@ -9002,8 +9170,8 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
ParamAttrTy &ParamAttr = ParamAttrs[Pos];
ParamAttr.Kind = Linear;
if (*SI) {
- if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
- Expr::SE_AllowSideEffects)) {
+ Expr::EvalResult Result;
+ if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
if (const auto *DRE =
cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
@@ -9012,6 +9180,8 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
ParamPositions[StridePVD->getCanonicalDecl()]);
}
}
+ } else {
+ ParamAttr.StrideOrArg = Result.Val.getInt();
}
}
++SI;
diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h
index 35f75a9ec0..d9ac5df36b 100644
--- a/lib/CodeGen/CGOpenMPRuntime.h
+++ b/lib/CodeGen/CGOpenMPRuntime.h
@@ -19,8 +19,8 @@
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
@@ -278,12 +278,39 @@ protected:
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
+ void setLocThreadIdInsertPt(CodeGenFunction &CGF,
+ bool AtCurrentPoint = false);
+ void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
+
+ /// Check if the default location must be constant.
+ /// Default is false to support OMPT/OMPD.
+ virtual bool isDefaultLocationConstant() const { return false; }
+
+ /// Returns additional flags that can be stored in reserved_2 field of the
+ /// default location.
+ virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
+
+ /// Returns default flags for the barriers depending on the directive, for
+ /// which this barier is going to be emitted.
+ static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
+
+ /// Get the LLVM type for the critical name.
+ llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
+
+ /// Returns corresponding lock object for the specified critical region
+ /// name. If the lock object does not exist it is created, otherwise the
+ /// reference to the existing copy is returned.
+ /// \param CriticalName Name of the critical region.
+ ///
+ llvm::Value *getCriticalRegionLock(StringRef CriticalName);
+
private:
/// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
+ using FlagsTy = std::pair<unsigned, unsigned>;
/// Map of flags and corresponding default locations.
- typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
+ using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
Address getOrCreateDefaultLocation(unsigned Flags);
@@ -300,6 +327,8 @@ private:
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
+ /// Insert point for the service instructions.
+ llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
@@ -596,7 +625,11 @@ private:
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
- llvm::SmallDenseSet<const Decl *> AlreadyEmittedTargetFunctions;
+ /// List of the emitted functions.
+ llvm::StringSet<> AlreadyEmittedTargetFunctions;
+ /// List of the global variables with their addresses that should not be
+ /// emitted for the target.
+ llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
@@ -673,10 +706,10 @@ private:
const llvm::Twine &Name);
/// Set of threadprivate variables with the generated initializer.
- llvm::SmallPtrSet<const VarDecl *, 4> ThreadPrivateWithDefinition;
+ llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
- llvm::SmallPtrSet<const VarDecl *, 4> DeclareTargetWithDefinition;
+ llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
@@ -688,13 +721,6 @@ private:
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
- /// Returns corresponding lock object for the specified critical region
- /// name. If the lock object does not exist it is created, otherwise the
- /// reference to the existing copy is returned.
- /// \param CriticalName Name of the critical region.
- ///
- llvm::Value *getCriticalRegionLock(StringRef CriticalName);
-
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Value *TaskEntry = nullptr;
@@ -884,6 +910,20 @@ public:
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
+ /// Check if the specified \a ScheduleKind is static chunked.
+ /// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
+ /// \param Chunked True if chunk is specified in the clause.
+ ///
+ virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) const;
+
+ /// Check if the specified \a ScheduleKind is static non-chunked.
+ /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
+ /// \param Chunked True if chunk is specified in the clause.
+ ///
+ virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
+ bool Chunked) const;
+
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
@@ -1500,7 +1540,7 @@ public:
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
- llvm::Value *&Chunk) const {}
+ const Expr *&ChunkExpr) const {}
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
@@ -1517,12 +1557,23 @@ public:
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
- /// Marks the declaration as alread emitted for the device code and returns
+ /// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
+
+ /// Adjust some parameters for the target-based directives, like addresses of
+ /// the variables captured by reference in lambdas.
+ virtual void
+ adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D) const;
+
+ /// Perform check on requires decl to ensure that target architecture
+ /// supports unified addressing
+ virtual void checkArchForUnifiedAddressing(CodeGenModule &CGM,
+ const OMPRequiresDecl *D) const {}
};
/// Class supports emissionof SIMD-only code.
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
index 14fd4a3113..b055132ef0 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Cuda.h"
#include "llvm/ADT/SmallPtrSet.h"
using namespace clang;
@@ -32,8 +33,8 @@ enum OpenMPRTLFunctionNVPTX {
/// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
/// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
OMPRTL_NVPTX__kmpc_spmd_kernel_init,
- /// Call to void __kmpc_spmd_kernel_deinit();
- OMPRTL_NVPTX__kmpc_spmd_kernel_deinit,
+ /// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
+ OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
/// Call to void __kmpc_kernel_prepare_parallel(void
/// *outlined_function, int16_t
/// IsOMPRuntimeInitialized);
@@ -61,31 +62,21 @@ enum OpenMPRTLFunctionNVPTX {
/// lane_offset, int16_t shortCircuit),
/// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
OMPRTL_NVPTX__kmpc_parallel_reduce_nowait,
- /// Call to __kmpc_nvptx_simd_reduce_nowait(kmp_int32
- /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
- /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- /// lane_offset, int16_t shortCircuit),
- /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
- OMPRTL_NVPTX__kmpc_simd_reduce_nowait,
- /// Call to __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
- /// int32_t num_vars, size_t reduce_size, void *reduce_data,
- /// void (*kmp_ShuffleReductFctPtr)(void *rhs, int16_t lane_id, int16_t
- /// lane_offset, int16_t shortCircuit),
- /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
- /// void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
- /// int32_t index, int32_t width),
- /// void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad, int32_t
- /// index, int32_t width, int32_t reduce))
- OMPRTL_NVPTX__kmpc_teams_reduce_nowait,
+ /// Call to __kmpc_nvptx_teams_reduce_nowait_simple(ident_t *loc, kmp_int32
+ /// global_tid, kmp_critical_name *lck)
+ OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple,
+ /// Call to __kmpc_nvptx_teams_end_reduce_nowait_simple(ident_t *loc,
+ /// kmp_int32 global_tid, kmp_critical_name *lck)
+ OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple,
/// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
OMPRTL_NVPTX__kmpc_end_reduce_nowait,
/// Call to void __kmpc_data_sharing_init_stack();
OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
/// Call to void __kmpc_data_sharing_init_stack_spmd();
OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
- /// Call to void* __kmpc_data_sharing_push_stack(size_t size,
+ /// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
/// int16_t UseSharedMemory);
- OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
+ OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
/// Call to void __kmpc_data_sharing_pop_stack(void *a);
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
/// Call to void __kmpc_begin_sharing_variables(void ***args,
@@ -100,6 +91,13 @@ enum OpenMPRTLFunctionNVPTX {
OMPRTL_NVPTX__kmpc_parallel_level,
/// Call to int8_t __kmpc_is_spmd_exec_mode();
OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
+ /// Call to void __kmpc_get_team_static_memory(const void *buf, size_t size,
+ /// int16_t is_shared, const void **res);
+ OMPRTL_NVPTX__kmpc_get_team_static_memory,
+ /// Call to void __kmpc_restore_team_static_memory(int16_t is_shared);
+ OMPRTL_NVPTX__kmpc_restore_team_static_memory,
+ // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
+ OMPRTL__kmpc_barrier,
};
/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
@@ -142,19 +140,35 @@ public:
/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
/// to the target region and used by containing directives such as 'parallel'
/// to emit optimized code.
-class ExecutionModeRAII {
+class ExecutionRuntimeModesRAII {
private:
- CGOpenMPRuntimeNVPTX::ExecutionMode SavedMode;
- CGOpenMPRuntimeNVPTX::ExecutionMode &Mode;
+ CGOpenMPRuntimeNVPTX::ExecutionMode SavedExecMode =
+ CGOpenMPRuntimeNVPTX::EM_Unknown;
+ CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode;
+ bool SavedRuntimeMode = false;
+ bool *RuntimeMode = nullptr;
public:
- ExecutionModeRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &Mode, bool IsSPMD)
- : Mode(Mode) {
- SavedMode = Mode;
- Mode = IsSPMD ? CGOpenMPRuntimeNVPTX::EM_SPMD
- : CGOpenMPRuntimeNVPTX::EM_NonSPMD;
+ /// Constructor for Non-SPMD mode.
+ ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode)
+ : ExecMode(ExecMode) {
+ SavedExecMode = ExecMode;
+ ExecMode = CGOpenMPRuntimeNVPTX::EM_NonSPMD;
+ }
+ /// Constructor for SPMD mode.
+ ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode,
+ bool &RuntimeMode, bool FullRuntimeMode)
+ : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
+ SavedExecMode = ExecMode;
+ SavedRuntimeMode = RuntimeMode;
+ ExecMode = CGOpenMPRuntimeNVPTX::EM_SPMD;
+ RuntimeMode = FullRuntimeMode;
+ }
+ ~ExecutionRuntimeModesRAII() {
+ ExecMode = SavedExecMode;
+ if (RuntimeMode)
+ *RuntimeMode = SavedRuntimeMode;
}
- ~ExecutionModeRAII() { Mode = SavedMode; }
};
/// GPU Configuration: This information can be derived from cuda registers,
@@ -169,16 +183,34 @@ enum MachineConfiguration : unsigned {
LaneIDMask = WarpSize - 1,
/// Global memory alignment for performance.
- GlobalMemoryAlignment = 256,
-};
+ GlobalMemoryAlignment = 128,
-enum NamedBarrier : unsigned {
- /// Synchronize on this barrier #ID using a named barrier primitive.
- /// Only the subset of active threads in a parallel region arrive at the
- /// barrier.
- NB_Parallel = 1,
+ /// Maximal size of the shared memory buffer.
+ SharedMemorySize = 128,
};
+static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
+ RefExpr = RefExpr->IgnoreParens();
+ if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
+ const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ RefExpr = Base;
+ } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
+ const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = TempOASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ RefExpr = Base;
+ }
+ RefExpr = RefExpr->IgnoreParenImpCasts();
+ if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
+ return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
+ const auto *ME = cast<MemberExpr>(RefExpr);
+ return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
+}
+
typedef std::pair<CharUnits /*Align*/, const ValueDecl *> VarsDataTy;
static bool stable_sort_comparator(const VarsDataTy P1, const VarsDataTy P2) {
return P1.first > P2.first;
@@ -186,20 +218,31 @@ static bool stable_sort_comparator(const VarsDataTy P1, const VarsDataTy P2) {
static RecordDecl *buildRecordForGlobalizedVars(
ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
+ ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
&MappedDeclsFields) {
- if (EscapedDecls.empty())
+ if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
return nullptr;
SmallVector<VarsDataTy, 4> GlobalizedVars;
for (const ValueDecl *D : EscapedDecls)
+ GlobalizedVars.emplace_back(
+ CharUnits::fromQuantity(std::max(
+ C.getDeclAlign(D).getQuantity(),
+ static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
+ D);
+ for (const ValueDecl *D : EscapedDeclsForTeams)
GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
std::stable_sort(GlobalizedVars.begin(), GlobalizedVars.end(),
stable_sort_comparator);
// Build struct _globalized_locals_ty {
- // /* globalized vars */
+ // /* globalized vars */[WarSize] align (max(decl_align,
+ // GlobalMemoryAlignment))
+ // /* globalized vars */ for EscapedDeclsForTeams
// };
RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
GlobalizedRD->startDefinition();
+ llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
+ EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
for (const auto &Pair : GlobalizedVars) {
const ValueDecl *VD = Pair.second;
QualType Type = VD->getType();
@@ -208,19 +251,39 @@ static RecordDecl *buildRecordForGlobalizedVars(
else
Type = Type.getNonReferenceType();
SourceLocation Loc = VD->getLocation();
- auto *Field =
- FieldDecl::Create(C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
- C.getTrivialTypeSourceInfo(Type, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- GlobalizedRD->addDecl(Field);
- if (VD->hasAttrs()) {
- for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
- E(VD->getAttrs().end());
- I != E; ++I)
- Field->addAttr(*I);
+ FieldDecl *Field;
+ if (SingleEscaped.count(VD)) {
+ Field = FieldDecl::Create(
+ C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
+ C.getTrivialTypeSourceInfo(Type, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ if (VD->hasAttrs()) {
+ for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
+ E(VD->getAttrs().end());
+ I != E; ++I)
+ Field->addAttr(*I);
+ }
+ } else {
+ llvm::APInt ArraySize(32, WarpSize);
+ Type = C.getConstantArrayType(Type, ArraySize, ArrayType::Normal, 0);
+ Field = FieldDecl::Create(
+ C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
+ C.getTrivialTypeSourceInfo(Type, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
+ static_cast<CharUnits::QuantityType>(
+ GlobalMemoryAlignment)));
+ Field->addAttr(AlignedAttr::CreateImplicit(
+ C, AlignedAttr::GNU_aligned, /*IsAlignmentExpr=*/true,
+ IntegerLiteral::Create(C, Align,
+ C.getIntTypeForBitwidth(32, /*Signed=*/0),
+ SourceLocation())));
}
+ GlobalizedRD->addDecl(Field);
MappedDeclsFields.try_emplace(VD, Field);
}
GlobalizedRD->completeDefinition();
@@ -256,9 +319,11 @@ class CheckVarsEscapingDeclContext final
const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
if (!Attr)
return;
- if (!isOpenMPPrivate(
- static_cast<OpenMPClauseKind>(Attr->getCaptureKind())) ||
- Attr->getCaptureKind() == OMPC_map)
+ if (((Attr->getCaptureKind() != OMPC_map) &&
+ !isOpenMPPrivate(
+ static_cast<OpenMPClauseKind>(Attr->getCaptureKind()))) ||
+ ((Attr->getCaptureKind() == OMPC_map) &&
+ !FD->getType()->isAnyPointerType()))
return;
}
if (!FD->getType()->isReferenceType()) {
@@ -340,15 +405,24 @@ class CheckVarsEscapingDeclContext final
}
}
- void buildRecordForGlobalizedVars() {
+ void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
assert(!GlobalizedRD &&
"Record for globalized variables is built already.");
+ ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
+ if (IsInTTDRegion)
+ EscapedDeclsForTeams = EscapedDecls.getArrayRef();
+ else
+ EscapedDeclsForParallel = EscapedDecls.getArrayRef();
GlobalizedRD = ::buildRecordForGlobalizedVars(
- CGF.getContext(), EscapedDecls.getArrayRef(), MappedDeclsFields);
+ CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
+ MappedDeclsFields);
}
public:
- CheckVarsEscapingDeclContext(CodeGenFunction &CGF) : CGF(CGF) {}
+ CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
+ ArrayRef<const ValueDecl *> TeamsReductions)
+ : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
+ }
virtual ~CheckVarsEscapingDeclContext() = default;
void VisitDeclStmt(const DeclStmt *S) {
if (!S)
@@ -490,9 +564,9 @@ public:
/// Returns the record that handles all the escaped local variables and used
/// instead of their original storage.
- const RecordDecl *getGlobalizedRecord() {
+ const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
if (!GlobalizedRD)
- buildRecordForGlobalizedVars();
+ buildRecordForGlobalizedVars(IsInTTDRegion);
return GlobalizedRD;
}
@@ -568,29 +642,15 @@ static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
/// Get barrier to synchronize all threads in a block.
static void getNVPTXCTABarrier(CodeGenFunction &CGF) {
- CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier0));
-}
-
-/// Get barrier #ID to synchronize selected (multiple of warp size) threads in
-/// a CTA.
-static void getNVPTXBarrier(CodeGenFunction &CGF, int ID,
- llvm::Value *NumThreads) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Value *Args[] = {Bld.getInt32(ID), NumThreads};
- CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier),
- Args);
+ llvm::Function *F = llvm::Intrinsic::getDeclaration(
+ &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier0);
+ F->addFnAttr(llvm::Attribute::Convergent);
+ CGF.EmitRuntimeCall(F);
}
/// Synchronize all GPU threads in a block.
static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); }
-/// Synchronize worker threads in a parallel region.
-static void syncParallelThreads(CodeGenFunction &CGF, llvm::Value *NumThreads) {
- return getNVPTXBarrier(CGF, NB_Parallel, NumThreads);
-}
-
/// Get the value of the thread_limit clause in the teams directive.
/// For the 'generic' execution mode, the runtime encodes thread_limit in
/// the launch parameters, always starting thread_limit+warpSize threads per
@@ -652,12 +712,58 @@ getDataSharingMode(CodeGenModule &CGM) {
: CGOpenMPRuntimeNVPTX::Generic;
}
+// Checks if the expression is constant or does not have non-trivial function
+// calls.
+static bool isTrivial(ASTContext &Ctx, const Expr * E) {
+ // We can skip constant expressions.
+ // We can skip expressions with trivial calls or simple expressions.
+ return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
+ !E->hasNonTrivialCall(Ctx)) &&
+ !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
+}
+
/// Checks if the \p Body is the \a CompoundStmt and returns its child statement
-/// iff there is only one.
-static const Stmt *getSingleCompoundChild(const Stmt *Body) {
- if (const auto *C = dyn_cast<CompoundStmt>(Body))
- if (C->size() == 1)
- return C->body_front();
+/// iff there is only one that is not evaluatable at the compile time.
+static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body) {
+ if (const auto *C = dyn_cast<CompoundStmt>(Body)) {
+ const Stmt *Child = nullptr;
+ for (const Stmt *S : C->body()) {
+ if (const auto *E = dyn_cast<Expr>(S)) {
+ if (isTrivial(Ctx, E))
+ continue;
+ }
+ // Some of the statements can be ignored.
+ if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
+ isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
+ continue;
+ // Analyze declarations.
+ if (const auto *DS = dyn_cast<DeclStmt>(S)) {
+ if (llvm::all_of(DS->decls(), [&Ctx](const Decl *D) {
+ if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
+ isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
+ isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
+ isa<UsingDirectiveDecl>(D) ||
+ isa<OMPDeclareReductionDecl>(D) ||
+ isa<OMPThreadPrivateDecl>(D))
+ return true;
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (!VD)
+ return false;
+ return VD->isConstexpr() ||
+ ((VD->getType().isTrivialType(Ctx) ||
+ VD->getType()->isReferenceType()) &&
+ (!VD->hasInit() || isTrivial(Ctx, VD->getInit())));
+ }))
+ continue;
+ }
+ // Found multiple children - cannot get the one child only.
+ if (Child)
+ return Body;
+ Child = S;
+ }
+ if (Child)
+ return Child;
+ }
return Body;
}
@@ -686,7 +792,7 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
const auto *CS = D.getInnermostCapturedStmt();
const auto *Body =
CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt = getSingleCompoundChild(Body);
+ const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
@@ -700,7 +806,7 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Body);
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPParallelDirective(DKind) &&
@@ -781,10 +887,8 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
- return !hasParallelIfNumThreadsClause(Ctx, D);
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
- // Distribute with lastprivates requires non-SPMD execution mode.
return !hasParallelIfNumThreadsClause(Ctx, D);
case OMPD_target_simd:
case OMPD_target_teams_distribute:
@@ -861,7 +965,7 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
const auto *CS = D.getInnermostCapturedStmt();
const auto *Body =
CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt = getSingleCompoundChild(Body);
+ const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
@@ -876,7 +980,7 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Body);
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPWorksharingDirective(DKind) &&
@@ -888,7 +992,7 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Body);
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPParallelDirective(DKind) &&
@@ -900,7 +1004,7 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Body);
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPWorksharingDirective(DKind) &&
@@ -921,7 +1025,7 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
/*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Body);
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPWorksharingDirective(DKind) &&
@@ -1069,7 +1173,7 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/false);
+ ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
EntryFunctionState EST;
WorkerFunctionState WST(CGM, D.getBeginLoc());
Work.clear();
@@ -1085,17 +1189,35 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
: EST(EST), WST(WST) {}
void Enter(CodeGenFunction &CGF) override {
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
- .emitNonSPMDEntryHeader(CGF, EST, WST);
+ auto &RT =
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
+ RT.emitNonSPMDEntryHeader(CGF, EST, WST);
+ // Skip target region initialization.
+ RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
void Exit(CodeGenFunction &CGF) override {
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
- .emitNonSPMDEntryFooter(CGF, EST);
+ auto &RT =
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
+ RT.clearLocThreadIdInsertPt(CGF);
+ RT.emitNonSPMDEntryFooter(CGF, EST);
}
} Action(EST, WST);
CodeGen.setAction(Action);
+ IsInTTDRegion = true;
+ // Reserve place for the globalized memory.
+ GlobalizedRecords.emplace_back();
+ if (!KernelStaticGlobalized) {
+ KernelStaticGlobalized = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
+ "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
+ }
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
+ IsInTTDRegion = false;
// Now change the name of the worker function to correspond to this target
// region's entry function.
@@ -1183,7 +1305,10 @@ void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/true);
+ ExecutionRuntimeModesRAII ModeRAII(
+ CurrentExecutionMode, RequiresFullRuntime,
+ CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
+ !supportsLightweightRuntime(CGM.getContext(), D));
EntryFunctionState EST;
// Emit target region as a standalone region.
@@ -1199,14 +1324,30 @@ void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
: RT(RT), EST(EST), D(D) {}
void Enter(CodeGenFunction &CGF) override {
RT.emitSPMDEntryHeader(CGF, EST, D);
+ // Skip target region initialization.
+ RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
void Exit(CodeGenFunction &CGF) override {
+ RT.clearLocThreadIdInsertPt(CGF);
RT.emitSPMDEntryFooter(CGF, EST);
}
} Action(*this, EST, D);
CodeGen.setAction(Action);
+ IsInTTDRegion = true;
+ // Reserve place for the globalized memory.
+ GlobalizedRecords.emplace_back();
+ if (!KernelStaticGlobalized) {
+ KernelStaticGlobalized = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
+ "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
+ }
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
+ IsInTTDRegion = false;
}
void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
@@ -1218,14 +1359,10 @@ void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
EST.ExitBB = CGF.createBasicBlock(".exit");
- // Initialize the OMP state in the runtime; called by all active threads.
- bool RequiresFullRuntime = CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
- !supportsLightweightRuntime(CGF.getContext(), D);
- llvm::Value *Args[] = {
- getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
- /*RequiresOMPRuntime=*/
- Bld.getInt16(RequiresFullRuntime ? 1 : 0),
- /*RequiresDataSharing=*/Bld.getInt16(RequiresFullRuntime ? 1 : 0)};
+ llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
+ /*RequiresOMPRuntime=*/
+ Bld.getInt16(RequiresFullRuntime ? 1 : 0),
+ /*RequiresDataSharing=*/Bld.getInt16(0)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
@@ -1256,8 +1393,11 @@ void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF,
CGF.EmitBlock(OMPDeInitBB);
// DeInitialize the OMP state in the runtime; called by all active threads.
+ llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
+ CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_deinit), None);
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args);
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(EST.ExitBB);
@@ -1344,6 +1484,8 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
// Signal start of parallel region.
CGF.EmitBlock(ExecuteBB);
+ // Skip initialization.
+ setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
// Process work items: outlined parallel functions.
for (llvm::Function *W : Work) {
@@ -1404,6 +1546,8 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
// Exit target region.
CGF.EmitBlock(ExitBB);
+ // Skip initialization.
+ clearLocThreadIdInsertPt(CGF);
}
/// Returns specified OpenMP runtime function for the current OpenMP
@@ -1440,11 +1584,12 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
break;
}
- case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit: {
- // Build void __kmpc_spmd_kernel_deinit();
+ case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: {
+ // Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
+ llvm::Type *TypeParams[] = {CGM.Int16Ty};
auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit");
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
@@ -1536,83 +1681,37 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait");
break;
}
- case OMPRTL_NVPTX__kmpc_simd_reduce_nowait: {
- // Build int32_t kmpc_nvptx_simd_reduce_nowait(kmp_int32 global_tid,
- // kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
- // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- // lane_offset, int16_t Algorithm Version),
- // void (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
- llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
- CGM.Int16Ty, CGM.Int16Ty};
- auto *ShuffleReduceFnTy =
- llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
- auto *InterWarpCopyFnTy =
- llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {CGM.Int32Ty,
- CGM.Int32Ty,
- CGM.SizeTy,
- CGM.VoidPtrTy,
- ShuffleReduceFnTy->getPointerTo(),
- InterWarpCopyFnTy->getPointerTo()};
+ case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
+ // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {CGM.Int32Ty};
auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_simd_reduce_nowait");
+ FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
break;
}
- case OMPRTL_NVPTX__kmpc_teams_reduce_nowait: {
- // Build int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
- // int32_t num_vars, size_t reduce_size, void *reduce_data,
- // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- // lane_offset, int16_t shortCircuit),
- // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
- // void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
- // int32_t index, int32_t width),
- // void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad,
- // int32_t index, int32_t width, int32_t reduce))
- llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
- CGM.Int16Ty, CGM.Int16Ty};
- auto *ShuffleReduceFnTy =
- llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
- auto *InterWarpCopyFnTy =
- llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
- /*isVarArg=*/false);
- llvm::Type *CopyToScratchpadTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy,
- CGM.Int32Ty, CGM.Int32Ty};
- auto *CopyToScratchpadFnTy =
- llvm::FunctionType::get(CGM.VoidTy, CopyToScratchpadTypeParams,
- /*isVarArg=*/false);
- llvm::Type *LoadReduceTypeParams[] = {
- CGM.VoidPtrTy, CGM.VoidPtrTy, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty};
- auto *LoadReduceFnTy =
- llvm::FunctionType::get(CGM.VoidTy, LoadReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {CGM.Int32Ty,
- CGM.Int32Ty,
- CGM.SizeTy,
- CGM.VoidPtrTy,
- ShuffleReduceFnTy->getPointerTo(),
- InterWarpCopyFnTy->getPointerTo(),
- CopyToScratchpadFnTy->getPointerTo(),
- LoadReduceFnTy->getPointerTo()};
+ case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple: {
+ // Build __kmpc_nvptx_teams_reduce_nowait_simple(ident_t *loc, kmp_int32
+ // global_tid, kmp_critical_name *lck)
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(getKmpCriticalNameTy())};
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait");
+ FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_simple");
break;
}
- case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
- // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {CGM.Int32Ty};
+ case OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple: {
+ // Build __kmpc_nvptx_teams_end_reduce_nowait_simple(ident_t *loc, kmp_int32
+ // global_tid, kmp_critical_name *lck)
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(getKmpCriticalNameTy())};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
+ FnTy, /*Name=*/"__kmpc_nvptx_teams_end_reduce_nowait_simple");
break;
}
case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
@@ -1630,14 +1729,14 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
break;
}
- case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
- // Build void *__kmpc_data_sharing_push_stack(size_t size,
+ case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: {
+ // Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size,
// int16_t UseSharedMemory);
llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
+ FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
break;
}
case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
@@ -1687,6 +1786,33 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
break;
}
+ case OMPRTL_NVPTX__kmpc_get_team_static_memory: {
+ // Build void __kmpc_get_team_static_memory(const void *buf, size_t size,
+ // int16_t is_shared, const void **res);
+ llvm::Type *TypeParams[] = {CGM.VoidPtrTy, CGM.SizeTy, CGM.Int16Ty,
+ CGM.VoidPtrPtrTy};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_restore_team_static_memory: {
+ // Build void __kmpc_restore_team_static_memory(int16_t is_shared);
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, CGM.Int16Ty, /*isVarArg=*/false);
+ RTLFn =
+ CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory");
+ break;
+ }
+ case OMPRTL__kmpc_barrier: {
+ // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
+ cast<llvm::Function>(RTLFn)->addFnAttr(llvm::Attribute::Convergent);
+ break;
+ }
}
return RTLFn;
}
@@ -1733,6 +1859,37 @@ void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
}
+namespace {
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+/// Enum for accesseing the reserved_2 field of the ident_t struct.
+enum ModeFlagsTy : unsigned {
+ /// Bit set to 1 when in SPMD mode.
+ KMP_IDENT_SPMD_MODE = 0x01,
+ /// Bit set to 1 when a simplified runtime is used.
+ KMP_IDENT_SIMPLE_RT_MODE = 0x02,
+ LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
+};
+
+/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
+static const ModeFlagsTy UndefinedMode =
+ (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
+} // anonymous namespace
+
+unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
+ switch (getExecutionMode()) {
+ case EM_SPMD:
+ if (requiresFullRuntime())
+ return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
+ return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
+ case EM_NonSPMD:
+ assert(requiresFullRuntime() && "Expected full runtime.");
+ return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
+ case EM_Unknown:
+ return UndefinedMode;
+ }
+ llvm_unreachable("Unknown flags are requested.");
+}
+
CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, "_", "$") {
if (!CGM.getLangOpts().OpenMPIsDevice)
@@ -1784,12 +1941,15 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
}
} Action(IsInParallelRegion);
CodeGen.setAction(Action);
+ bool PrevIsInTTDRegion = IsInTTDRegion;
+ IsInTTDRegion = false;
bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
IsInTargetMasterThreadRegion = false;
auto *OutlinedFun =
cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen));
IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
+ IsInTTDRegion = PrevIsInTTDRegion;
if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
!IsInParallelRegion) {
llvm::Function *WrapperFun =
@@ -1803,13 +1963,14 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
/// Get list of lastprivate variables from the teams distribute ... or
/// teams {distribute ...} directives.
static void
-getDistributeLastprivateVars(const OMPExecutableDirective &D,
+getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
"expected teams directive.");
const OMPExecutableDirective *Dir = &D;
if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
if (const Stmt *S = getSingleCompoundChild(
+ Ctx,
D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true))) {
Dir = dyn_cast<OMPExecutableDirective>(S);
@@ -1819,12 +1980,21 @@ getDistributeLastprivateVars(const OMPExecutableDirective &D,
}
if (!Dir)
return;
- for (const OMPLastprivateClause *C :
- Dir->getClausesOfKind<OMPLastprivateClause>()) {
- for (const Expr *E : C->getVarRefs()) {
- const auto *DE = cast<DeclRefExpr>(E->IgnoreParens());
- Vars.push_back(cast<ValueDecl>(DE->getDecl()->getCanonicalDecl()));
- }
+ for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
+ for (const Expr *E : C->getVarRefs())
+ Vars.push_back(getPrivateItem(E));
+ }
+}
+
+/// Get list of reduction variables from the teams ... directives.
+static void
+getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
+ assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
+ "expected teams directive.");
+ for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
+ for (const Expr *E : C->privates())
+ Vars.push_back(getPrivateItem(E));
}
}
@@ -1834,13 +2004,22 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
SourceLocation Loc = D.getBeginLoc();
const RecordDecl *GlobalizedRD = nullptr;
- llvm::SmallVector<const ValueDecl *, 4> LastPrivates;
+ llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
+ // Globalize team reductions variable unconditionally in all modes.
+ getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
- getDistributeLastprivateVars(D, LastPrivates);
- if (!LastPrivates.empty())
- GlobalizedRD = buildRecordForGlobalizedVars(
- CGM.getContext(), LastPrivates, MappedDeclsFields);
+ getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
+ if (!LastPrivatesReductions.empty()) {
+ GlobalizedRD = ::buildRecordForGlobalizedVars(
+ CGM.getContext(), llvm::None, LastPrivatesReductions,
+ MappedDeclsFields);
+ }
+ } else if (!LastPrivatesReductions.empty()) {
+ assert(!TeamAndReductions.first &&
+ "Previous team declaration is not expected.");
+ TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
+ std::swap(TeamAndReductions.second, LastPrivatesReductions);
}
// Emit target region as a standalone region.
@@ -1869,9 +2048,9 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
for (const auto &Pair : MappedDeclsFields) {
assert(Pair.getFirst()->isCanonicalDecl() &&
"Expected canonical declaration");
- Data.insert(std::make_pair(
- Pair.getFirst(),
- std::make_pair(Pair.getSecond(), Address::invalid())));
+ Data.insert(std::make_pair(Pair.getFirst(),
+ MappedVarData(Pair.getSecond(),
+ /*IsOnePerTeam=*/true)));
}
}
Rt.emitGenericVarsProlog(CGF, Loc);
@@ -1905,74 +2084,184 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
if (I == FunctionGlobalizedDecls.end())
return;
if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
- QualType RecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
+ QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
+ QualType SecGlobalRecTy;
// Recover pointer to this function's global record. The runtime will
// handle the specifics of the allocation of the memory.
// Use actual memory size of the record including the padding
// for alignment purposes.
unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(RecTy).getQuantity();
+ CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
unsigned GlobalRecordSize =
- CGM.getContext().getTypeSizeInChars(RecTy).getQuantity();
+ CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
+ llvm::PointerType *GlobalRecPtrTy =
+ CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
llvm::Value *GlobalRecCastAddr;
- if (WithSPMDCheck ||
- getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown) {
+ llvm::Value *IsTTD = nullptr;
+ if (!IsInTTDRegion &&
+ (WithSPMDCheck ||
+ getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
+ if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *PL = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
+ {RTLoc, ThreadID});
+ IsTTD = Bld.CreateIsNull(PL);
+ }
llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(SPMDBB);
- Address RecPtr = CGF.CreateMemTemp(RecTy, "_local_stack");
+ Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
+ CharUnits::fromQuantity(Alignment));
CGF.EmitBranch(ExitBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(NonSPMDBB);
+ llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
+ if (const RecordDecl *SecGlobalizedVarsRecord =
+ I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
+ SecGlobalRecTy =
+ CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
+
+ // Recover pointer to this function's global record. The runtime will
+ // handle the specifics of the allocation of the memory.
+ // Use actual memory size of the record including the padding
+ // for alignment purposes.
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
+ unsigned GlobalRecordSize =
+ CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
+ GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
+ Size = Bld.CreateSelect(
+ IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
+ }
// TODO: allow the usage of shared memory to be controlled by
// the user, for now, default to global.
llvm::Value *GlobalRecordSizeArg[] = {
- llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
- CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
- llvm::Value *GlobalRecValue =
- CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_push_stack),
- GlobalRecordSizeArg);
+ Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
+ llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
+ GlobalRecordSizeArg);
GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, CGF.ConvertTypeForMem(RecTy)->getPointerTo());
+ GlobalRecValue, GlobalRecPtrTy);
CGF.EmitBlock(ExitBB);
- auto *Phi = Bld.CreatePHI(GlobalRecCastAddr->getType(),
+ auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
/*NumReservedValues=*/2, "_select_stack");
Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
GlobalRecCastAddr = Phi;
I->getSecond().GlobalRecordAddr = Phi;
I->getSecond().IsInSPMDModeFlag = IsSPMD;
+ } else if (IsInTTDRegion) {
+ assert(GlobalizedRecords.back().Records.size() < 2 &&
+ "Expected less than 2 globalized records: one for target and one "
+ "for teams.");
+ unsigned Offset = 0;
+ for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
+ QualType RDTy = CGM.getContext().getRecordType(RD);
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
+ unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
+ Offset =
+ llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
+ }
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
+ Offset = llvm::alignTo(Offset, Alignment);
+ GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
+ ++GlobalizedRecords.back().RegionCounter;
+ if (GlobalizedRecords.back().Records.size() == 1) {
+ assert(KernelStaticGlobalized &&
+ "Kernel static pointer must be initialized already.");
+ auto *UseSharedMemory = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, nullptr,
+ "_openmp_static_kernel$is_shared");
+ UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/16, /*Signed=*/0);
+ llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
+ Address(UseSharedMemory,
+ CGM.getContext().getTypeAlignInChars(Int16Ty)),
+ /*Volatile=*/false, Int16Ty, Loc);
+ auto *StaticGlobalized = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
+ llvm::GlobalValue::CommonLinkage, nullptr);
+ auto *RecSize = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, nullptr,
+ "_openmp_static_kernel$size");
+ RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ llvm::Value *Ld = CGF.EmitLoadOfScalar(
+ Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
+ CGM.getContext().getSizeType(), Loc);
+ llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ KernelStaticGlobalized, CGM.VoidPtrPtrTy);
+ llvm::Value *GlobalRecordSizeArg[] = {StaticGlobalized, Ld,
+ IsInSharedMemory, ResAddr};
+ CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_get_team_static_memory),
+ GlobalRecordSizeArg);
+ GlobalizedRecords.back().Buffer = StaticGlobalized;
+ GlobalizedRecords.back().RecSize = RecSize;
+ GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
+ GlobalizedRecords.back().Loc = Loc;
+ }
+ assert(KernelStaticGlobalized && "Global address must be set already.");
+ Address FrameAddr = CGF.EmitLoadOfPointer(
+ Address(KernelStaticGlobalized, CGM.getPointerAlign()),
+ CGM.getContext()
+ .getPointerType(CGM.getContext().VoidPtrTy)
+ .castAs<PointerType>());
+ llvm::Value *GlobalRecValue =
+ Bld.CreateConstInBoundsGEP(FrameAddr, Offset, CharUnits::One())
+ .getPointer();
+ I->getSecond().GlobalRecordAddr = GlobalRecValue;
+ I->getSecond().IsInSPMDModeFlag = nullptr;
+ GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
} else {
// TODO: allow the usage of shared memory to be controlled by
// the user, for now, default to global.
llvm::Value *GlobalRecordSizeArg[] = {
llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
- llvm::Value *GlobalRecValue =
- CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_push_stack),
- GlobalRecordSizeArg);
+ llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
+ GlobalRecordSizeArg);
GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, CGF.ConvertTypeForMem(RecTy)->getPointerTo());
+ GlobalRecValue, GlobalRecPtrTy);
I->getSecond().GlobalRecordAddr = GlobalRecValue;
I->getSecond().IsInSPMDModeFlag = nullptr;
}
LValue Base =
- CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, RecTy);
+ CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
// Emit the "global alloca" which is a GEP from the global declaration
// record using the pointer returned by the runtime.
+ LValue SecBase;
+ decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
+ if (IsTTD) {
+ SecIt = I->getSecond().SecondaryLocalVarData->begin();
+ llvm::PointerType *SecGlobalRecPtrTy =
+ CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
+ SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
+ SecGlobalRecTy);
+ }
for (auto &Rec : I->getSecond().LocalVarData) {
bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
llvm::Value *ParValue;
@@ -1982,14 +2271,51 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
}
- const FieldDecl *FD = Rec.second.first;
- LValue VarAddr = CGF.EmitLValueForField(Base, FD);
- Rec.second.second = VarAddr.getAddress();
+ LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
+ // Emit VarAddr basing on lane-id if required.
+ QualType VarTy;
+ if (Rec.second.IsOnePerTeam) {
+ VarTy = Rec.second.FD->getType();
+ } else {
+ llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
+ VarAddr.getAddress().getPointer(),
+ {Bld.getInt32(0), getNVPTXLaneID(CGF)});
+ VarTy =
+ Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
+ VarAddr = CGF.MakeAddrLValue(
+ Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
+ AlignmentSource::Decl);
+ }
+ Rec.second.PrivateAddr = VarAddr.getAddress();
+ if (!IsInTTDRegion &&
+ (WithSPMDCheck ||
+ getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
+ assert(I->getSecond().IsInSPMDModeFlag &&
+ "Expected unknown execution mode or required SPMD check.");
+ if (IsTTD) {
+ assert(SecIt->second.IsOnePerTeam &&
+ "Secondary glob data must be one per team.");
+ LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
+ VarAddr.setAddress(
+ Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(),
+ VarAddr.getPointer()),
+ VarAddr.getAlignment()));
+ Rec.second.PrivateAddr = VarAddr.getAddress();
+ }
+ Address GlobalPtr = Rec.second.PrivateAddr;
+ Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
+ Rec.second.PrivateAddr = Address(
+ Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
+ LocalAddr.getPointer(), GlobalPtr.getPointer()),
+ LocalAddr.getAlignment());
+ }
if (EscapedParam) {
const auto *VD = cast<VarDecl>(Rec.first);
CGF.EmitStoreOfScalar(ParValue, VarAddr);
I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress());
}
+ if (IsTTD)
+ ++SecIt;
}
}
for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
@@ -2011,7 +2337,8 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
llvm::Value *GlobalRecordSizeArg[] = {
Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_push_stack),
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
GlobalRecordSizeArg);
llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
@@ -2043,8 +2370,9 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
Addr);
}
if (I->getSecond().GlobalRecordAddr) {
- if (WithSPMDCheck ||
- getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown) {
+ if (!IsInTTDRegion &&
+ (WithSPMDCheck ||
+ getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
@@ -2057,6 +2385,23 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
CGF.EmitBlock(ExitBB);
+ } else if (IsInTTDRegion) {
+ assert(GlobalizedRecords.back().RegionCounter > 0 &&
+ "region counter must be > 0.");
+ --GlobalizedRecords.back().RegionCounter;
+ // Emit the restore function only in the target region.
+ if (GlobalizedRecords.back().RegionCounter == 0) {
+ QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/16, /*Signed=*/0);
+ llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
+ Address(GlobalizedRecords.back().UseSharedMemory,
+ CGM.getContext().getTypeAlignInChars(Int16Ty)),
+ /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_restore_team_static_memory),
+ IsInSharedMemory);
+ }
} else {
CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
@@ -2155,7 +2500,7 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
// passed from the outside of the target region.
CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
- // There's somehting to share.
+ // There's something to share.
if (!CapturedVars.empty()) {
// Prepare for parallel region. Indicate the outlined function.
Address SharedArgs =
@@ -2209,30 +2554,24 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
Work.emplace_back(WFn);
};
- auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen, &CodeGen,
- &ThreadIDAddr](CodeGenFunction &CGF,
- PrePostActionTy &Action) {
- RegionCodeGenTy RCG(CodeGen);
+ auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
if (IsInParallelRegion) {
SeqGen(CGF, Action);
} else if (IsInTargetMasterThreadRegion) {
L0ParallelGen(CGF, Action);
- } else if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_NonSPMD) {
- RCG(CGF);
} else {
// Check for master and then parallelism:
// if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
- // Serialized execution.
- // } else if (master) {
- // Worker call.
+ // Serialized execution.
// } else {
- // Outlined function call.
+ // Worker call.
// }
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
- llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
+ llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
@@ -2245,29 +2584,17 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
{RTLoc, ThreadID});
llvm::Value *Res = Bld.CreateIsNotNull(PL);
- Bld.CreateCondBr(Res, SeqBB, MasterCheckBB);
+ Bld.CreateCondBr(Res, SeqBB, MasterBB);
CGF.EmitBlock(SeqBB);
SeqGen(CGF, Action);
CGF.EmitBranch(ExitBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(MasterCheckBB);
- llvm::BasicBlock *MasterThenBB = CGF.createBasicBlock("master.then");
- llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
- llvm::Value *IsMaster =
- Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
- Bld.CreateCondBr(IsMaster, MasterThenBB, ElseBlock);
- CGF.EmitBlock(MasterThenBB);
+ CGF.EmitBlock(MasterBB);
L0ParallelGen(CGF, Action);
CGF.EmitBranch(ExitBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ElseBlock);
- // In the worker need to use the real thread id.
- ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
- RCG(CGF);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
// Emit the continuation block for code after the if.
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -2338,6 +2665,20 @@ void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
}
}
+void CGOpenMPRuntimeNVPTX::emitBarrierCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool,
+ bool) {
+ // Always emit simple barriers!
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ unsigned Flags = getDefaultFlagsForBarriers(Kind);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
+ getThreadID(CGF, Loc)};
+ CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args);
+}
+
void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
@@ -2380,14 +2721,16 @@ void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
CGF.EmitBlock(BodyBB);
// Output the critical statement.
- CriticalOpGen(CGF);
+ CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
+ Hint);
// After the body surrounded by the critical region, the single executing
// thread will jump to the synchronisation point.
// Block waits for all threads in current team to finish then increments the
// counter variable and returns to the loop.
CGF.EmitBlock(SyncBB);
- getNVPTXCTABarrier(CGF);
+ emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
llvm::Value *IncCounterVal =
CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
@@ -2509,11 +2852,12 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
IntType, Offset, Loc);
CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
- Ptr = Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
- ElemPtr =
+ Address LocalPtr =
+ Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
+ Address LocalElemPtr =
Bld.CreateConstGEP(ElemPtr, 1, CharUnits::fromQuantity(IntSize));
- PhiSrc->addIncoming(Ptr.getPointer(), ThenBB);
- PhiDest->addIncoming(ElemPtr.getPointer(), ThenBB);
+ PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
+ PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
CGF.EmitBranch(PreCondBB);
CGF.EmitBlock(ExitBB);
} else {
@@ -2739,223 +3083,6 @@ static void emitReductionListCopy(
}
}
-/// This function emits a helper that loads data from the scratchpad array
-/// and (optionally) reduces it with the input operand.
-///
-/// load_and_reduce(local, scratchpad, index, width, should_reduce)
-/// reduce_data remote;
-/// for elem in remote:
-/// remote.elem = Scratchpad[elem_id][index]
-/// if (should_reduce)
-/// local = local @ remote
-/// else
-/// local = remote
-static llvm::Value *emitReduceScratchpadFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, llvm::Value *ReduceFn, SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
- QualType Int32Ty = C.getIntTypeForBitwidth(32, /*Signed=*/1);
-
- // Destination of the copy.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // Base address of the scratchpad array, with each element storing a
- // Reduce list per team.
- ImplicitParamDecl ScratchPadArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // A source index into the scratchpad array.
- ImplicitParamDecl IndexArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
- ImplicitParamDecl::Other);
- // Row width of an element in the scratchpad array, typically
- // the number of teams.
- ImplicitParamDecl WidthArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
- ImplicitParamDecl::Other);
- // If should_reduce == 1, then it's load AND reduce,
- // If should_reduce == 0 (or otherwise), then it only loads (+ copy).
- // The latter case is used for initialization.
- ImplicitParamDecl ShouldReduceArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- Int32Ty, ImplicitParamDecl::Other);
-
- FunctionArgList Args;
- Args.push_back(&ReduceListArg);
- Args.push_back(&ScratchPadArg);
- Args.push_back(&IndexArg);
- Args.push_back(&WidthArg);
- Args.push_back(&ShouldReduceArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_load_and_reduce", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- // Get local Reduce list pointer.
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address ReduceListAddr(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
-
- Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
- llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
- AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
-
- Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
- llvm::Value *IndexVal = Bld.CreateIntCast(
- CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false, Int32Ty, Loc),
- CGM.SizeTy, /*isSigned=*/true);
-
- Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
- llvm::Value *WidthVal = Bld.CreateIntCast(
- CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false, Int32Ty, Loc),
- CGM.SizeTy, /*isSigned=*/true);
-
- Address AddrShouldReduceArg = CGF.GetAddrOfLocalVar(&ShouldReduceArg);
- llvm::Value *ShouldReduceVal = CGF.EmitLoadOfScalar(
- AddrShouldReduceArg, /*Volatile=*/false, Int32Ty, Loc);
-
- // The absolute ptr address to the base addr of the next element to copy.
- llvm::Value *CumulativeElemBasePtr =
- Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
- Address SrcDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
-
- // Create a Remote Reduce list to store the elements read from the
- // scratchpad array.
- Address RemoteReduceList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_red_list");
-
- // Assemble remote Reduce list from scratchpad array.
- emitReductionListCopy(ScratchpadToThread, CGF, ReductionArrayTy, Privates,
- SrcDataAddr, RemoteReduceList,
- {/*RemoteLaneOffset=*/nullptr,
- /*ScratchpadIndex=*/IndexVal,
- /*ScratchpadWidth=*/WidthVal});
-
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
-
- llvm::Value *CondReduce = Bld.CreateIsNotNull(ShouldReduceVal);
- Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
-
- CGF.EmitBlock(ThenBB);
- // We should reduce with the local Reduce list.
- // reduce_function(LocalReduceList, RemoteReduceList)
- llvm::Value *LocalDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ReduceListAddr.getPointer(), CGF.VoidPtrTy);
- llvm::Value *RemoteDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- RemoteReduceList.getPointer(), CGF.VoidPtrTy);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
- CGF, Loc, ReduceFn, {LocalDataPtr, RemoteDataPtr});
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(ElseBB);
- // No reduction; just copy:
- // Local Reduce list = Remote Reduce list.
- emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
- RemoteReduceList, ReduceListAddr);
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(MergeBB);
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that stores reduced data from the team
-/// master to a scratchpad array in global memory.
-///
-/// for elem in Reduce List:
-/// scratchpad[elem_id][index] = elem
-///
-static llvm::Value *emitCopyToScratchpad(CodeGenModule &CGM,
- ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy,
- SourceLocation Loc) {
-
- ASTContext &C = CGM.getContext();
- QualType Int32Ty = C.getIntTypeForBitwidth(32, /*Signed=*/1);
-
- // Source of the copy.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // Base address of the scratchpad array, with each element storing a
- // Reduce list per team.
- ImplicitParamDecl ScratchPadArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // A destination index into the scratchpad array, typically the team
- // identifier.
- ImplicitParamDecl IndexArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
- ImplicitParamDecl::Other);
- // Row width of an element in the scratchpad array, typically
- // the number of teams.
- ImplicitParamDecl WidthArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
- ImplicitParamDecl::Other);
-
- FunctionArgList Args;
- Args.push_back(&ReduceListArg);
- Args.push_back(&ScratchPadArg);
- Args.push_back(&IndexArg);
- Args.push_back(&WidthArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_copy_to_scratchpad", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address SrcDataAddr(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
-
- Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
- llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
- AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
-
- Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
- llvm::Value *IndexVal = Bld.CreateIntCast(
- CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false, Int32Ty, Loc),
- CGF.SizeTy, /*isSigned=*/true);
-
- Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
- llvm::Value *WidthVal =
- Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false,
- Int32Ty, SourceLocation()),
- CGF.SizeTy, /*isSigned=*/true);
-
- // The absolute ptr address to the base addr of the next element to copy.
- llvm::Value *CumulativeElemBasePtr =
- Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
- Address DestDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
-
- emitReductionListCopy(ThreadToScratchpad, CGF, ReductionArrayTy, Privates,
- SrcDataAddr, DestDataAddr,
- {/*RemoteLaneOffset=*/nullptr,
- /*ScratchpadIndex=*/IndexVal,
- /*ScratchpadWidth=*/WidthVal});
-
- CGF.FinishFunction();
- return Fn;
-}
-
/// This function emits a helper that gathers Reduce lists from the first
/// lane of every active warp to lanes in the first warp.
///
@@ -3013,11 +3140,10 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::GlobalVariable *TransferMedium =
M.getGlobalVariable(TransferMediumName);
if (!TransferMedium) {
- auto *Ty = llvm::ArrayType::get(CGM.Int64Ty, WarpSize);
+ auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
TransferMedium = new llvm::GlobalVariable(
- M, Ty,
- /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
+ M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
llvm::Constant::getNullValue(Ty), TransferMediumName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
SharedAddressSpace);
@@ -3035,7 +3161,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, SourceLocation()),
+ C.VoidPtrTy, Loc),
CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
CGF.getPointerAlign());
@@ -3045,121 +3171,153 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// Warp master copies reduce element to transfer medium in __shared__
// memory.
//
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
-
- // if (lane_id == 0)
- llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
- Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
- CGF.EmitBlock(ThenBB);
-
- // Reduce element = LocalReduceList[i]
- Address ElemPtrPtrAddr =
- Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
- llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
- ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- // elemptr = (type[i]*)(elemptrptr)
- Address ElemPtr =
- Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
- ElemPtr = Bld.CreateElementBitCast(
- ElemPtr, CGF.ConvertTypeForMem(Private->getType()));
-
- // Get pointer to location in transfer medium.
- // MediumPtr = &medium[warp_id]
- llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
- Address MediumPtr(MediumPtrVal, C.getTypeAlignInChars(Private->getType()));
- // Casting to actual data type.
- // MediumPtr = (type[i]*)MediumPtrAddr;
- MediumPtr = Bld.CreateElementBitCast(
- MediumPtr, CGF.ConvertTypeForMem(Private->getType()));
-
- // elem = *elemptr
- //*MediumPtr = elem
- if (Private->getType()->isScalarType()) {
- llvm::Value *Elem = CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false,
- Private->getType(), Loc);
- // Store the source element value to the dest element address.
- CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/false,
- Private->getType());
- } else {
- CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
- CGF.MakeAddrLValue(MediumPtr, Private->getType()),
- Private->getType(), AggValueSlot::DoesNotOverlap);
- }
-
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(ElseBB);
- Bld.CreateBr(MergeBB);
+ unsigned RealTySize =
+ C.getTypeSizeInChars(Private->getType())
+ .alignTo(C.getTypeAlignInChars(Private->getType()))
+ .getQuantity();
+ for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
+ unsigned NumIters = RealTySize / TySize;
+ if (NumIters == 0)
+ continue;
+ QualType CType = C.getIntTypeForBitwidth(
+ C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
+ llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
+ CharUnits Align = CharUnits::fromQuantity(TySize);
+ llvm::Value *Cnt = nullptr;
+ Address CntAddr = Address::invalid();
+ llvm::BasicBlock *PrecondBB = nullptr;
+ llvm::BasicBlock *ExitBB = nullptr;
+ if (NumIters > 1) {
+ CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
+ CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
+ /*Volatile=*/false, C.IntTy);
+ PrecondBB = CGF.createBasicBlock("precond");
+ ExitBB = CGF.createBasicBlock("exit");
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(PrecondBB);
+ Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
+ llvm::Value *Cmp =
+ Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
+ Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
+ CGF.EmitBlock(BodyBB);
+ }
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
+ llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
+ llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
- CGF.EmitBlock(MergeBB);
+ // if (lane_id == 0)
+ llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
+ Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
+ CGF.EmitBlock(ThenBB);
- Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
- llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
- AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, SourceLocation());
+ // Reduce element = LocalReduceList[i]
+ Address ElemPtrPtrAddr =
+ Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
+ llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
+ ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
+ // elemptr = ((CopyType*)(elemptrptr)) + I
+ Address ElemPtr = Address(ElemPtrPtr, Align);
+ ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
+ if (NumIters > 1) {
+ ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
+ ElemPtr.getAlignment());
+ }
- llvm::Value *NumActiveThreads = Bld.CreateNSWMul(
- NumWarpsVal, getNVPTXWarpSize(CGF), "num_active_threads");
- // named_barrier_sync(ParallelBarrierID, num_active_threads)
- syncParallelThreads(CGF, NumActiveThreads);
+ // Get pointer to location in transfer medium.
+ // MediumPtr = &medium[warp_id]
+ llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
+ TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
+ Address MediumPtr(MediumPtrVal, Align);
+ // Casting to actual data type.
+ // MediumPtr = (CopyType*)MediumPtrAddr;
+ MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
+
+ // elem = *elemptr
+ //*MediumPtr = elem
+ llvm::Value *Elem =
+ CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false, CType, Loc);
+ // Store the source element value to the dest element address.
+ CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType);
+
+ Bld.CreateBr(MergeBB);
+
+ CGF.EmitBlock(ElseBB);
+ Bld.CreateBr(MergeBB);
+
+ CGF.EmitBlock(MergeBB);
+
+ // kmpc_barrier.
+ CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
+ /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+
+ //
+ // Warp 0 copies reduce element from transfer medium.
+ //
+ llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
+ llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
+ llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
+
+ Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
+ llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
+ AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
+
+ // Up to 32 threads in warp 0 are active.
+ llvm::Value *IsActiveThread =
+ Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
+ Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
+
+ CGF.EmitBlock(W0ThenBB);
+
+ // SrcMediumPtr = &medium[tid]
+ llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
+ TransferMedium,
+ {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
+ Address SrcMediumPtr(SrcMediumPtrVal, Align);
+ // SrcMediumVal = *SrcMediumPtr;
+ SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
+
+ // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
+ Address TargetElemPtrPtr =
+ Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
+ llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
+ TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
+ Address TargetElemPtr = Address(TargetElemPtrVal, Align);
+ TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
+ if (NumIters > 1) {
+ TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
+ TargetElemPtr.getAlignment());
+ }
- //
- // Warp 0 copies reduce element from transfer medium.
- //
- llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
-
- // Up to 32 threads in warp 0 are active.
- llvm::Value *IsActiveThread =
- Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
- Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
-
- CGF.EmitBlock(W0ThenBB);
-
- // SrcMediumPtr = &medium[tid]
- llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
- Address SrcMediumPtr(SrcMediumPtrVal,
- C.getTypeAlignInChars(Private->getType()));
- // SrcMediumVal = *SrcMediumPtr;
- SrcMediumPtr = Bld.CreateElementBitCast(
- SrcMediumPtr, CGF.ConvertTypeForMem(Private->getType()));
-
- // TargetElemPtr = (type[i]*)(SrcDataAddr[i])
- Address TargetElemPtrPtr =
- Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
- llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
- TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- Address TargetElemPtr =
- Address(TargetElemPtrVal, C.getTypeAlignInChars(Private->getType()));
- TargetElemPtr = Bld.CreateElementBitCast(
- TargetElemPtr, CGF.ConvertTypeForMem(Private->getType()));
-
- // *TargetElemPtr = SrcMediumVal;
- if (Private->getType()->isScalarType()) {
- llvm::Value *SrcMediumValue = CGF.EmitLoadOfScalar(
- SrcMediumPtr, /*Volatile=*/false, Private->getType(), Loc);
+ // *TargetElemPtr = SrcMediumVal;
+ llvm::Value *SrcMediumValue =
+ CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
- Private->getType());
- } else {
- CGF.EmitAggregateCopy(
- CGF.MakeAddrLValue(SrcMediumPtr, Private->getType()),
- CGF.MakeAddrLValue(TargetElemPtr, Private->getType()),
- Private->getType(), AggValueSlot::DoesNotOverlap);
+ CType);
+ Bld.CreateBr(W0MergeBB);
+
+ CGF.EmitBlock(W0ElseBB);
+ Bld.CreateBr(W0MergeBB);
+
+ CGF.EmitBlock(W0MergeBB);
+
+ // While warp 0 copies values from transfer medium, all other warps must
+ // wait.
+ // kmpc_barrier.
+ CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
+ /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ if (NumIters > 1) {
+ Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
+ CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
+ CGF.EmitBranch(PrecondBB);
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ExitBB);
+ }
+ RealTySize %= TySize;
}
- Bld.CreateBr(W0MergeBB);
-
- CGF.EmitBlock(W0ElseBB);
- Bld.CreateBr(W0MergeBB);
-
- CGF.EmitBlock(W0MergeBB);
-
- // While warp 0 copies values from transfer medium, all other warps must
- // wait.
- syncParallelThreads(CGF, NumActiveThreads);
++Idx;
}
@@ -3633,125 +3791,115 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
return;
bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
+#ifndef NDEBUG
bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
- bool SimdReduction = isOpenMPSimdDirective(Options.ReductionKind);
- assert((TeamsReduction || ParallelReduction || SimdReduction) &&
- "Invalid reduction selection in emitReduction.");
+#endif
if (Options.SimpleReduction) {
+ assert(!TeamsReduction && !ParallelReduction &&
+ "Invalid reduction selection in emitReduction.");
CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
ReductionOps, Options);
return;
}
- ASTContext &C = CGM.getContext();
-
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- auto Size = RHSExprs.size();
- for (const Expr *E : Privates) {
- if (E->getType()->isVariablyModifiedType())
- // Reserve place for array size.
- ++Size;
- }
- llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
- QualType ReductionArrayTy =
- C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
- unsigned Idx = 0;
- for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
- CGF.getPointerSize());
- CGF.Builder.CreateStore(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
- Elem);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
- CGF.getPointerSize());
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
- }
-
- // 2. Emit reduce_func().
- llvm::Value *ReductionFn = emitReductionFunction(
- CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
- Privates, LHSExprs, RHSExprs, ReductionOps);
+ assert((TeamsReduction || ParallelReduction) &&
+ "Invalid reduction selection in emitReduction.");
- // 4. Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
+ // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
// RedList, shuffle_reduce_func, interwarp_copy_func);
+ // or
+ // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
- llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
- llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- ReductionList.getPointer(), CGF.VoidPtrTy);
-
- llvm::Value *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
- CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
- llvm::Value *InterWarpCopyFn =
- emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
-
- llvm::Value *Args[] = {ThreadId,
- CGF.Builder.getInt32(RHSExprs.size()),
- ReductionArrayTySize,
- RL,
- ShuffleAndReduceFn,
- InterWarpCopyFn};
-
- llvm::Value *Res = nullptr;
- if (ParallelReduction)
- Res = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_reduce_nowait),
- Args);
- else if (SimdReduction)
- Res = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_simd_reduce_nowait),
- Args);
- if (TeamsReduction) {
- llvm::Value *ScratchPadCopyFn =
- emitCopyToScratchpad(CGM, Privates, ReductionArrayTy, Loc);
- llvm::Value *LoadAndReduceFn = emitReduceScratchpadFunction(
+ llvm::Value *Res;
+ if (ParallelReduction) {
+ ASTContext &C = CGM.getContext();
+ // 1. Build a list of reduction variables.
+ // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
+ auto Size = RHSExprs.size();
+ for (const Expr *E : Privates) {
+ if (E->getType()->isVariablyModifiedType())
+ // Reserve place for array size.
+ ++Size;
+ }
+ llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
+ QualType ReductionArrayTy =
+ C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ Address ReductionList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
+ auto IPriv = Privates.begin();
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
+ CGF.getPointerSize());
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
+ Elem);
+ if ((*IPriv)->getType()->isVariablyModifiedType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
+ CGF.getPointerSize());
+ llvm::Value *Size = CGF.Builder.CreateIntCast(
+ CGF.getVLASize(
+ CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
+ .NumElts,
+ CGF.SizeTy, /*isSigned=*/false);
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
+ Elem);
+ }
+ }
+
+ llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
+ llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ ReductionList.getPointer(), CGF.VoidPtrTy);
+ llvm::Value *ReductionFn = emitReductionFunction(
+ CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
+ llvm::Value *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
+ llvm::Value *InterWarpCopyFn =
+ emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
llvm::Value *Args[] = {ThreadId,
CGF.Builder.getInt32(RHSExprs.size()),
ReductionArrayTySize,
RL,
ShuffleAndReduceFn,
- InterWarpCopyFn,
- ScratchPadCopyFn,
- LoadAndReduceFn};
+ InterWarpCopyFn};
+
+ Res = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_reduce_nowait),
+ Args);
+ } else {
+ assert(TeamsReduction && "expected teams reduction.");
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ std::string Name = getName({"reduction"});
+ llvm::Value *Lock = getCriticalRegionLock(Name);
+ llvm::Value *Args[] = {RTLoc, ThreadId, Lock};
Res = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_teams_reduce_nowait),
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple),
Args);
}
- // 5. Build switch(res)
- llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
- llvm::SwitchInst *SwInst =
- CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/1);
+ // 5. Build if (res == 1)
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
+ llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
+ Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
+ CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
- // 6. Build case 1: where we have reduced values in the master
+ // 6. Build then branch: where we have reduced values in the master
// thread in each team.
// __kmpc_end_reduce{_nowait}(<gtid>);
// break;
- llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
- SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
- CGF.EmitBlock(Case1BB);
+ CGF.EmitBlock(ThenBB);
// Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
- llvm::Value *EndArgs[] = {ThreadId};
auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
this](CodeGenFunction &CGF, PrePostActionTy &Action) {
auto IPriv = Privates.begin();
@@ -3765,15 +3913,33 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
++IRHS;
}
};
- RegionCodeGenTy RCG(CodeGen);
- NVPTXActionTy Action(
- nullptr, llvm::None,
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
- EndArgs);
- RCG.setAction(Action);
- RCG(CGF);
- CGF.EmitBranch(DefaultBB);
- CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
+ if (ParallelReduction) {
+ llvm::Value *EndArgs[] = {ThreadId};
+ RegionCodeGenTy RCG(CodeGen);
+ NVPTXActionTy Action(
+ nullptr, llvm::None,
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
+ EndArgs);
+ RCG.setAction(Action);
+ RCG(CGF);
+ } else {
+ assert(TeamsReduction && "expected teams reduction.");
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ std::string Name = getName({"reduction"});
+ llvm::Value *Lock = getCriticalRegionLock(Name);
+ llvm::Value *EndArgs[] = {RTLoc, ThreadId, Lock};
+ RegionCodeGenTy RCG(CodeGen);
+ NVPTXActionTy Action(
+ nullptr, llvm::None,
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple),
+ EndArgs);
+ RCG.setAction(Action);
+ RCG(CGF);
+ }
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
const VarDecl *
@@ -4000,6 +4166,8 @@ void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
assert(D && "Expected function or captured|block decl.");
assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
"Function is registered already.");
+ assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
+ "Team is set but not processed.");
const Stmt *Body = nullptr;
bool NeedToDelayGlobalization = false;
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
@@ -4015,9 +4183,12 @@ void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
}
if (!Body)
return;
- CheckVarsEscapingDeclContext VarChecker(CGF);
+ CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
VarChecker.Visit(Body);
- const RecordDecl *GlobalizedVarsRecord = VarChecker.getGlobalizedRecord();
+ const RecordDecl *GlobalizedVarsRecord =
+ VarChecker.getGlobalizedRecord(IsInTTDRegion);
+ TeamAndReductions.first = nullptr;
+ TeamAndReductions.second.clear();
ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
VarChecker.getEscapedVariableLengthDecls();
if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
@@ -4035,7 +4206,21 @@ void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
assert(VD->isCanonicalDecl() && "Expected canonical declaration");
const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
- Data.insert(std::make_pair(VD, std::make_pair(FD, Address::invalid())));
+ Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
+ }
+ if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
+ CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
+ VarChecker.Visit(Body);
+ I->getSecond().SecondaryGlobalRecord =
+ VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
+ I->getSecond().SecondaryLocalVarData.emplace();
+ DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
+ for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
+ assert(VD->isCanonicalDecl() && "Expected canonical declaration");
+ const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
+ Data.insert(
+ std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
+ }
}
if (!NeedToDelayGlobalization) {
emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
@@ -4062,7 +4247,7 @@ Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
return Address::invalid();
auto VDI = I->getSecond().LocalVarData.find(VD);
if (VDI != I->getSecond().LocalVarData.end())
- return VDI->second.second;
+ return VDI->second.PrivateAddr;
if (VD->hasAttrs()) {
for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
E(VD->attr_end());
@@ -4071,7 +4256,7 @@ Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
->getCanonicalDecl());
if (VDI != I->getSecond().LocalVarData.end())
- return VDI->second.second;
+ return VDI->second.PrivateAddr;
}
}
return Address::invalid();
@@ -4091,16 +4276,285 @@ void CGOpenMPRuntimeNVPTX::getDefaultDistScheduleAndChunk(
Chunk = CGF.EmitScalarConversion(getNVPTXNumThreads(CGF),
CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
S.getIterationVariable()->getType(), S.getBeginLoc());
+ return;
}
+ CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
+ CGF, S, ScheduleKind, Chunk);
}
void CGOpenMPRuntimeNVPTX::getDefaultScheduleAndChunk(
CodeGenFunction &CGF, const OMPLoopDirective &S,
OpenMPScheduleClauseKind &ScheduleKind,
- llvm::Value *&Chunk) const {
- if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
- ScheduleKind = OMPC_SCHEDULE_static;
- Chunk = CGF.Builder.getIntN(CGF.getContext().getTypeSize(
- S.getIterationVariable()->getType()), 1);
+ const Expr *&ChunkExpr) const {
+ ScheduleKind = OMPC_SCHEDULE_static;
+ // Chunk size is 1 in this case.
+ llvm::APInt ChunkSize(32, 1);
+ ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
+ CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
+ SourceLocation());
+}
+
+void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
+ assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
+ " Expected target-based directive.");
+ const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
+ for (const CapturedStmt::Capture &C : CS->captures()) {
+ // Capture variables captured by reference in lambdas for target-based
+ // directives.
+ if (!C.capturesVariable())
+ continue;
+ const VarDecl *VD = C.getCapturedVar();
+ const auto *RD = VD->getType()
+ .getCanonicalType()
+ .getNonReferenceType()
+ ->getAsCXXRecordDecl();
+ if (!RD || !RD->isLambda())
+ continue;
+ Address VDAddr = CGF.GetAddrOfLocalVar(VD);
+ LValue VDLVal;
+ if (VD->getType().getCanonicalType()->isReferenceType())
+ VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
+ else
+ VDLVal = CGF.MakeAddrLValue(
+ VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
+ llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ FieldDecl *ThisCapture = nullptr;
+ RD->getCaptureFields(Captures, ThisCapture);
+ if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
+ LValue ThisLVal =
+ CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
+ llvm::Value *CXXThis = CGF.LoadCXXThis();
+ CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
+ }
+ for (const LambdaCapture &LC : RD->captures()) {
+ if (LC.getCaptureKind() != LCK_ByRef)
+ continue;
+ const VarDecl *VD = LC.getCapturedVar();
+ if (!CS->capturesVariable(VD))
+ continue;
+ auto It = Captures.find(VD);
+ assert(It != Captures.end() && "Found lambda capture without field.");
+ LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
+ Address VDAddr = CGF.GetAddrOfLocalVar(VD);
+ if (VD->getType().getCanonicalType()->isReferenceType())
+ VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
+ VD->getType().getCanonicalType())
+ .getAddress();
+ CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
+ }
+ }
+}
+
+// Get current CudaArch and ignore any unknown values
+static CudaArch getCudaArch(CodeGenModule &CGM) {
+ if (!CGM.getTarget().hasFeature("ptx"))
+ return CudaArch::UNKNOWN;
+ llvm::StringMap<bool> Features;
+ CGM.getTarget().initFeatureMap(Features, CGM.getDiags(),
+ CGM.getTarget().getTargetOpts().CPU,
+ CGM.getTarget().getTargetOpts().Features);
+ for (const auto &Feature : Features) {
+ if (Feature.getValue()) {
+ CudaArch Arch = StringToCudaArch(Feature.getKey());
+ if (Arch != CudaArch::UNKNOWN)
+ return Arch;
+ }
+ }
+ return CudaArch::UNKNOWN;
+}
+
+/// Check to see if target architecture supports unified addressing which is
+/// a restriction for OpenMP requires clause "unified_shared_memory".
+void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
+ CodeGenModule &CGM, const OMPRequiresDecl *D) const {
+ for (const OMPClause *Clause : D->clauselists()) {
+ if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
+ switch (getCudaArch(CGM)) {
+ case CudaArch::SM_20:
+ case CudaArch::SM_21:
+ case CudaArch::SM_30:
+ case CudaArch::SM_32:
+ case CudaArch::SM_35:
+ case CudaArch::SM_37:
+ case CudaArch::SM_50:
+ case CudaArch::SM_52:
+ case CudaArch::SM_53:
+ case CudaArch::SM_60:
+ case CudaArch::SM_61:
+ case CudaArch::SM_62:
+ CGM.Error(Clause->getBeginLoc(),
+ "Target architecture does not support unified addressing");
+ return;
+ case CudaArch::SM_70:
+ case CudaArch::SM_72:
+ case CudaArch::SM_75:
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
+ case CudaArch::UNKNOWN:
+ break;
+ case CudaArch::LAST:
+ llvm_unreachable("Unexpected Cuda arch.");
+ }
+ }
+ }
+}
+
+/// Get number of SMs and number of blocks per SM.
+static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
+ std::pair<unsigned, unsigned> Data;
+ if (CGM.getLangOpts().OpenMPCUDANumSMs)
+ Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
+ if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
+ Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
+ if (Data.first && Data.second)
+ return Data;
+ switch (getCudaArch(CGM)) {
+ case CudaArch::SM_20:
+ case CudaArch::SM_21:
+ case CudaArch::SM_30:
+ case CudaArch::SM_32:
+ case CudaArch::SM_35:
+ case CudaArch::SM_37:
+ case CudaArch::SM_50:
+ case CudaArch::SM_52:
+ case CudaArch::SM_53:
+ return {16, 16};
+ case CudaArch::SM_60:
+ case CudaArch::SM_61:
+ case CudaArch::SM_62:
+ return {56, 32};
+ case CudaArch::SM_70:
+ case CudaArch::SM_72:
+ case CudaArch::SM_75:
+ return {84, 32};
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
+ case CudaArch::UNKNOWN:
+ break;
+ case CudaArch::LAST:
+ llvm_unreachable("Unexpected Cuda arch.");
+ }
+ llvm_unreachable("Unexpected NVPTX target without ptx feature.");
+}
+
+void CGOpenMPRuntimeNVPTX::clear() {
+ if (!GlobalizedRecords.empty()) {
+ ASTContext &C = CGM.getContext();
+ llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
+ llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
+ RecordDecl *StaticRD = C.buildImplicitRecord(
+ "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
+ StaticRD->startDefinition();
+ RecordDecl *SharedStaticRD = C.buildImplicitRecord(
+ "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
+ SharedStaticRD->startDefinition();
+ for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
+ if (Records.Records.empty())
+ continue;
+ unsigned Size = 0;
+ unsigned RecAlignment = 0;
+ for (const RecordDecl *RD : Records.Records) {
+ QualType RDTy = C.getRecordType(RD);
+ unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
+ RecAlignment = std::max(RecAlignment, Alignment);
+ unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
+ Size =
+ llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
+ }
+ Size = llvm::alignTo(Size, RecAlignment);
+ llvm::APInt ArySize(/*numBits=*/64, Size);
+ QualType SubTy = C.getConstantArrayType(
+ C.CharTy, ArySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
+ const bool UseSharedMemory = Size <= SharedMemorySize;
+ auto *Field =
+ FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
+ SourceLocation(), SourceLocation(), nullptr, SubTy,
+ C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ if (UseSharedMemory) {
+ SharedStaticRD->addDecl(Field);
+ SharedRecs.push_back(&Records);
+ } else {
+ StaticRD->addDecl(Field);
+ GlobalRecs.push_back(&Records);
+ }
+ Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
+ Records.UseSharedMemory->setInitializer(
+ llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
+ }
+ SharedStaticRD->completeDefinition();
+ if (!SharedStaticRD->field_empty()) {
+ QualType StaticTy = C.getRecordType(SharedStaticRD);
+ llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), LLVMStaticTy,
+ /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
+ llvm::Constant::getNullValue(LLVMStaticTy),
+ "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ C.getTargetAddressSpace(LangAS::cuda_shared));
+ auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ GV, CGM.VoidPtrTy);
+ for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
+ Rec->Buffer->replaceAllUsesWith(Replacement);
+ Rec->Buffer->eraseFromParent();
+ }
+ }
+ StaticRD->completeDefinition();
+ if (!StaticRD->field_empty()) {
+ QualType StaticTy = C.getRecordType(StaticRD);
+ std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
+ llvm::APInt Size1(32, SMsBlockPerSM.second);
+ QualType Arr1Ty =
+ C.getConstantArrayType(StaticTy, Size1, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ llvm::APInt Size2(32, SMsBlockPerSM.first);
+ QualType Arr2Ty = C.getConstantArrayType(Arr1Ty, Size2, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), LLVMArr2Ty,
+ /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
+ llvm::Constant::getNullValue(LLVMArr2Ty),
+ "_openmp_static_glob_rd_$_");
+ auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ GV, CGM.VoidPtrTy);
+ for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
+ Rec->Buffer->replaceAllUsesWith(Replacement);
+ Rec->Buffer->eraseFromParent();
+ }
+ }
}
+ CGOpenMPRuntime::clear();
}
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
index fc8cd2467b..8fb3b0a061 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -56,6 +56,8 @@ private:
ExecutionMode getExecutionMode() const;
+ bool requiresFullRuntime() const { return RequiresFullRuntime; }
+
/// Emit the worker function for the current target region.
void emitWorkerFunction(WorkerFunctionState &WST);
@@ -180,8 +182,19 @@ protected:
return "__omp_outlined__";
}
+ /// Check if the default location must be constant.
+ /// Constant for NVPTX for better optimization.
+ bool isDefaultLocationConstant() const override { return true; }
+
+ /// Returns additional flags that can be stored in reserved_2 field of the
+ /// default location.
+ /// For NVPTX target contains data about SPMD/Non-SPMD execution mode +
+ /// Full/Lightweight runtime mode. Used for better optimization.
+ unsigned getDefaultLocationReserved2Flags() const override;
+
public:
explicit CGOpenMPRuntimeNVPTX(CodeGenModule &CGM);
+ void clear() override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
@@ -261,6 +274,18 @@ public:
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
+ /// Emit an implicit/explicit barrier for OpenMP threads.
+ /// \param Kind Directive for which this implicit barrier call must be
+ /// generated. Must be OMPD_barrier for explicit barrier generation.
+ /// \param EmitChecks true if need to emit checks for cancellation barriers.
+ /// \param ForceSimpleCall true simple barrier call must be emitted, false if
+ /// runtime class decides which one to emit (simple or with cancellation
+ /// checks).
+ ///
+ void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool EmitChecks = true,
+ bool ForceSimpleCall = false) override;
+
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
@@ -348,7 +373,17 @@ public:
/// Choose a default value for the schedule clause.
void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
- llvm::Value *&Chunk) const override;
+ const Expr *&ChunkExpr) const override;
+
+ /// Adjust some parameters for the target-based directives, like addresses of
+ /// the variables captured by reference in lambdas.
+ void adjustTargetSpecificDataForLambdas(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) const override;
+
+ /// Perform check on requires decl to ensure that target architecture
+ /// supports unified addressing
+ void checkArchForUnifiedAddressing(CodeGenModule &CGM,
+ const OMPRequiresDecl *D) const override;
private:
/// Track the execution mode when codegening directives within a target
@@ -357,9 +392,15 @@ private:
/// to emit optimized code.
ExecutionMode CurrentExecutionMode = EM_Unknown;
+ /// Check if the full runtime is required (default - yes).
+ bool RequiresFullRuntime = true;
+
/// true if we're emitting the code for the target region and next parallel
/// region is L0 for sure.
bool IsInTargetMasterThreadRegion = false;
+ /// true if currently emitting code for target/teams/distribute region, false
+ /// - otherwise.
+ bool IsInTTDRegion = false;
/// true if we're definitely in the parallel region.
bool IsInParallelRegion = false;
@@ -373,17 +414,31 @@ private:
llvm::Function *createParallelDataSharingWrapper(
llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D);
+ /// The data for the single globalized variable.
+ struct MappedVarData {
+ /// Corresponding field in the global record.
+ const FieldDecl *FD = nullptr;
+ /// Corresponding address.
+ Address PrivateAddr = Address::invalid();
+ /// true, if only one element is required (for latprivates in SPMD mode),
+ /// false, if need to create based on the warp-size.
+ bool IsOnePerTeam = false;
+ MappedVarData() = delete;
+ MappedVarData(const FieldDecl *FD, bool IsOnePerTeam = false)
+ : FD(FD), IsOnePerTeam(IsOnePerTeam) {}
+ };
/// The map of local variables to their addresses in the global memory.
- using DeclToAddrMapTy = llvm::MapVector<const Decl *,
- std::pair<const FieldDecl *, Address>>;
+ using DeclToAddrMapTy = llvm::MapVector<const Decl *, MappedVarData>;
/// Set of the parameters passed by value escaping OpenMP context.
using EscapedParamsTy = llvm::SmallPtrSet<const Decl *, 4>;
struct FunctionData {
DeclToAddrMapTy LocalVarData;
+ llvm::Optional<DeclToAddrMapTy> SecondaryLocalVarData = llvm::None;
EscapedParamsTy EscapedParameters;
llvm::SmallVector<const ValueDecl*, 4> EscapedVariableLengthDecls;
llvm::SmallVector<llvm::Value *, 4> EscapedVariableLengthDeclsAddrs;
const RecordDecl *GlobalRecord = nullptr;
+ llvm::Optional<const RecordDecl *> SecondaryGlobalRecord = llvm::None;
llvm::Value *GlobalRecordAddr = nullptr;
llvm::Value *IsInSPMDModeFlag = nullptr;
std::unique_ptr<CodeGenFunction::OMPMapVars> MappedParams;
@@ -391,6 +446,27 @@ private:
/// Maps the function to the list of the globalized variables with their
/// addresses.
llvm::SmallDenseMap<llvm::Function *, FunctionData> FunctionGlobalizedDecls;
+ /// List of records for the globalized variables in target/teams/distribute
+ /// contexts. Inner records are going to be joined into the single record,
+ /// while those resulting records are going to be joined into the single
+ /// union. This resulting union (one per CU) is the entry point for the static
+ /// memory management runtime functions.
+ struct GlobalPtrSizeRecsTy {
+ llvm::GlobalVariable *UseSharedMemory = nullptr;
+ llvm::GlobalVariable *RecSize = nullptr;
+ llvm::GlobalVariable *Buffer = nullptr;
+ SourceLocation Loc;
+ llvm::SmallVector<const RecordDecl *, 2> Records;
+ unsigned RegionCounter = 0;
+ };
+ llvm::SmallVector<GlobalPtrSizeRecsTy, 8> GlobalizedRecords;
+ /// Shared pointer for the global memory in the global memory buffer used for
+ /// the given kernel.
+ llvm::GlobalVariable *KernelStaticGlobalized = nullptr;
+ /// Pair of the Non-SPMD team and all reductions variables in this team
+ /// region.
+ std::pair<const Decl *, llvm::SmallVector<const ValueDecl *, 4>>
+ TeamAndReductions;
};
} // CodeGen namespace.
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 58aaae6925..c754541ac1 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 39a2cc145f..bc7a18af1e 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -19,8 +19,6 @@
#include "clang/Basic/Builtins.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Sema/LoopHint.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
@@ -1047,10 +1045,9 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// exception to our over-conservative rules about not jumping to
// statements following block literals with non-trivial cleanups.
RunCleanupsScope cleanupScope(*this);
- if (const ExprWithCleanups *cleanups =
- dyn_cast_or_null<ExprWithCleanups>(RV)) {
- enterFullExpression(cleanups);
- RV = cleanups->getSubExpr();
+ if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) {
+ enterFullExpression(fe);
+ RV = fe->getSubExpr();
}
// FIXME: Clean this up by using an LValue for ReturnTemp,
@@ -1823,9 +1820,9 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
// If this can't be a register or memory, i.e., has to be a constant
// (immediate or symbolic), try to emit it as such.
if (!Info.allowsRegister() && !Info.allowsMemory()) {
- llvm::APSInt Result;
+ Expr::EvalResult Result;
if (InputExpr->EvaluateAsInt(Result, getContext()))
- return llvm::ConstantInt::get(getLLVMContext(), Result);
+ return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
assert(!Info.requiresImmediateConstant() &&
"Required-immediate inlineasm arg isn't constant?");
}
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index 79ffa7c8e9..4f635efe71 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -385,12 +385,12 @@ static llvm::Function *emitOutlinedFunctionPrologue(
FunctionDecl *DebugFunctionDecl = nullptr;
if (!FO.UIntPtrCastRequired) {
FunctionProtoType::ExtProtoInfo EPI;
+ QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
DebugFunctionDecl = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
- SourceLocation(), DeclarationName(), Ctx.VoidTy,
- Ctx.getTrivialTypeSourceInfo(
- Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI)),
- SC_Static, /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false);
+ SourceLocation(), DeclarationName(), FunctionTy,
+ Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
+ /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false);
}
for (const FieldDecl *FD : RD->fields()) {
QualType ArgType = FD->getType();
@@ -1738,6 +1738,8 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
CGF.EmitOMPReductionClauseInit(S, LoopScope);
bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
(void)LoopScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
S.getInc(),
[&S](CodeGenFunction &CGF) {
@@ -2006,7 +2008,7 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop(
RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit);
// for combined 'distribute' and 'for' the increment expression of distribute
- // is store in DistInc. For 'distribute' alone, it is in Inc.
+ // is stored in DistInc. For 'distribute' alone, it is in Inc.
Expr *IncExpr;
if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
IncExpr = S.getDistInc();
@@ -2296,24 +2298,34 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
EmitOMPPrivateLoopCounters(S, LoopScope);
EmitOMPLinearClause(S, LoopScope);
(void)LoopScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
// Detect the loop schedule kind and chunk.
- llvm::Value *Chunk = nullptr;
+ const Expr *ChunkExpr = nullptr;
OpenMPScheduleTy ScheduleKind;
if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
ScheduleKind.Schedule = C->getScheduleKind();
ScheduleKind.M1 = C->getFirstScheduleModifier();
ScheduleKind.M2 = C->getSecondScheduleModifier();
- if (const Expr *Ch = C->getChunkSize()) {
- Chunk = EmitScalarExpr(Ch);
- Chunk = EmitScalarConversion(Chunk, Ch->getType(),
- S.getIterationVariable()->getType(),
- S.getBeginLoc());
- }
+ ChunkExpr = C->getChunkSize();
} else {
// Default behaviour for schedule clause.
CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
- *this, S, ScheduleKind.Schedule, Chunk);
+ *this, S, ScheduleKind.Schedule, ChunkExpr);
+ }
+ bool HasChunkSizeOne = false;
+ llvm::Value *Chunk = nullptr;
+ if (ChunkExpr) {
+ Chunk = EmitScalarExpr(ChunkExpr);
+ Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(),
+ S.getIterationVariable()->getType(),
+ S.getBeginLoc());
+ Expr::EvalResult Result;
+ if (ChunkExpr->EvaluateAsInt(Result, getContext())) {
+ llvm::APSInt EvaluatedChunk = Result.Val.getInt();
+ HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
+ }
}
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
@@ -2321,8 +2333,12 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// If the static schedule kind is specified or if the ordered clause is
// specified, and if no monotonic modifier is specified, the effect will
// be as if the monotonic modifier was specified.
- if (RT.isStaticNonchunked(ScheduleKind.Schedule,
- /* Chunked */ Chunk != nullptr) &&
+ bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule,
+ /* Chunked */ Chunk != nullptr) && HasChunkSizeOne &&
+ isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
+ if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
+ /* Chunked */ Chunk != nullptr) ||
+ StaticChunkedOne) &&
!Ordered) {
if (isOpenMPSimdDirective(S.getDirectiveKind()))
EmitOMPSimdInit(S, /*IsMonotonic=*/true);
@@ -2333,23 +2349,38 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// unspecified in this case.
CGOpenMPRuntime::StaticRTInput StaticInit(
IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(),
- UB.getAddress(), ST.getAddress());
+ UB.getAddress(), ST.getAddress(),
+ StaticChunkedOne ? Chunk : nullptr);
RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
ScheduleKind, StaticInit);
JumpDest LoopExit =
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
// UB = min(UB, GlobalUB);
- EmitIgnoredExpr(S.getEnsureUpperBound());
+ if (!StaticChunkedOne)
+ EmitIgnoredExpr(S.getEnsureUpperBound());
// IV = LB;
EmitIgnoredExpr(S.getInit());
- // while (idx <= UB) { BODY; ++idx; }
- EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
- S.getInc(),
- [&S, LoopExit](CodeGenFunction &CGF) {
- CGF.EmitOMPLoopBody(S, LoopExit);
- CGF.EmitStopPoint(&S);
- },
- [](CodeGenFunction &) {});
+ // For unchunked static schedule generate:
+ //
+ // while (idx <= UB) {
+ // BODY;
+ // ++idx;
+ // }
+ //
+ // For static schedule with chunk one:
+ //
+ // while (IV <= PrevUB) {
+ // BODY;
+ // IV += ST;
+ // }
+ EmitOMPInnerLoop(S, LoopScope.requiresCleanups(),
+ StaticChunkedOne ? S.getCombinedParForInDistCond() : S.getCond(),
+ StaticChunkedOne ? S.getDistInc() : S.getInc(),
+ [&S, LoopExit](CodeGenFunction &CGF) {
+ CGF.EmitOMPLoopBody(S, LoopExit);
+ CGF.EmitStopPoint(&S);
+ },
+ [](CodeGenFunction &) {});
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
@@ -2564,6 +2595,8 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
CGF.EmitOMPReductionClauseInit(S, LoopScope);
(void)LoopScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
// Emit static non-chunked loop.
OpenMPScheduleTy ScheduleKind;
@@ -2922,7 +2955,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
RedCG.emitAggregateType(CGF, Cnt);
// FIXME: This must removed once the runtime library is fixed.
// Emit required threadprivate variables for
- // initilizer/combiner/finalizer.
+ // initializer/combiner/finalizer.
CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
RedCG, Cnt);
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
@@ -2968,10 +3001,10 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
RedCG.emitSharedLValue(CGF, Cnt);
RedCG.emitAggregateType(CGF, Cnt);
// The taskgroup descriptor variable is always implicit firstprivate and
- // privatized already during procoessing of the firstprivates.
+ // privatized already during processing of the firstprivates.
// FIXME: This must removed once the runtime library is fixed.
// Emit required threadprivate variables for
- // initilizer/combiner/finalizer.
+ // initializer/combiner/finalizer.
CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
RedCG, Cnt);
llvm::Value *ReductionsPtr =
@@ -3317,6 +3350,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
EmitOMPPrivateLoopCounters(S, LoopScope);
(void)LoopScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
// Detect the distribute schedule kind and chunk.
llvm::Value *Chunk = nullptr;
@@ -3345,13 +3380,18 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
// iteration space is divided into chunks that are approximately equal
// in size, and at most one chunk is distributed to each team of the
// league. The size of the chunks is unspecified in this case.
+ bool StaticChunked = RT.isStaticChunked(
+ ScheduleKind, /* Chunked */ Chunk != nullptr) &&
+ isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
if (RT.isStaticNonchunked(ScheduleKind,
- /* Chunked */ Chunk != nullptr)) {
+ /* Chunked */ Chunk != nullptr) ||
+ StaticChunked) {
if (isOpenMPSimdDirective(S.getDirectiveKind()))
EmitOMPSimdInit(S, /*IsMonotonic=*/true);
CGOpenMPRuntime::StaticRTInput StaticInit(
IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(),
- LB.getAddress(), UB.getAddress(), ST.getAddress());
+ LB.getAddress(), UB.getAddress(), ST.getAddress(),
+ StaticChunked ? Chunk : nullptr);
RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
StaticInit);
JumpDest LoopExit =
@@ -3370,15 +3410,45 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
? S.getCombinedCond()
: S.getCond();
- // for distribute alone, codegen
- // while (idx <= UB) { BODY; ++idx; }
- // when combined with 'for' (e.g. as in 'distribute parallel for')
- // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
+ if (StaticChunked)
+ Cond = S.getCombinedDistCond();
+
+ // For static unchunked schedules generate:
+ //
+ // 1. For distribute alone, codegen
+ // while (idx <= UB) {
+ // BODY;
+ // ++idx;
+ // }
+ //
+ // 2. When combined with 'for' (e.g. as in 'distribute parallel for')
+ // while (idx <= UB) {
+ // <CodeGen rest of pragma>(LB, UB);
+ // idx += ST;
+ // }
+ //
+ // For static chunk one schedule generate:
+ //
+ // while (IV <= GlobalUB) {
+ // <CodeGen rest of pragma>(LB, UB);
+ // LB += ST;
+ // UB += ST;
+ // UB = min(UB, GlobalUB);
+ // IV = LB;
+ // }
+ //
EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), Cond, IncExpr,
[&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
CodeGenLoop(CGF, S, LoopExit);
},
- [](CodeGenFunction &) {});
+ [&S, StaticChunked](CodeGenFunction &CGF) {
+ if (StaticChunked) {
+ CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
+ CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
+ CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
+ CGF.EmitIgnoredExpr(S.getCombinedInit());
+ }
+ });
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
RT.emitForStaticFinish(*this, S.getBeginLoc(), S.getDirectiveKind());
@@ -3400,20 +3470,7 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
!isOpenMPParallelDirective(S.getDirectiveKind()) &&
!isOpenMPTeamsDirective(S.getDirectiveKind())) {
- OpenMPDirectiveKind ReductionKind = OMPD_unknown;
- if (isOpenMPParallelDirective(S.getDirectiveKind()) &&
- isOpenMPSimdDirective(S.getDirectiveKind())) {
- ReductionKind = OMPD_parallel_for_simd;
- } else if (isOpenMPParallelDirective(S.getDirectiveKind())) {
- ReductionKind = OMPD_parallel_for;
- } else if (isOpenMPSimdDirective(S.getDirectiveKind())) {
- ReductionKind = OMPD_simd;
- } else if (!isOpenMPTeamsDirective(S.getDirectiveKind()) &&
- S.hasClausesOfKind<OMPReductionClause>()) {
- llvm_unreachable(
- "No reduction clauses is allowed in distribute directive.");
- }
- EmitOMPReductionClauseFinal(S, ReductionKind);
+ EmitOMPReductionClauseFinal(S, OMPD_simd);
// Emit post-update of the reduction variables if IsLastIter != 0.
emitPostUpdateForReductionClause(
*this, S, [IL, &S](CodeGenFunction &CGF) {
@@ -3912,6 +3969,10 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
@@ -3928,13 +3989,13 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
}
const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS))
- enterFullExpression(EWC);
+ if (const auto *FE = dyn_cast<FullExpr>(CS))
+ enterFullExpression(FE);
// Processing for statements under 'atomic capture'.
if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
for (const Stmt *C : Compound->body()) {
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(C))
- enterFullExpression(EWC);
+ if (const auto *FE = dyn_cast<FullExpr>(C))
+ enterFullExpression(FE);
}
}
@@ -4021,6 +4082,8 @@ static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
CGF.EmitOMPPrivateClause(S, PrivateScope);
(void)PrivateScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
}
@@ -4101,6 +4164,8 @@ static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
CGF.EmitStmt(CS->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
@@ -4659,6 +4724,8 @@ static void emitTargetParallelRegion(CodeGenFunction &CGF,
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
// TODO: Add support for clauses.
CGF.EmitStmt(CS->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
@@ -4959,10 +5026,16 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
if (isOpenMPSimdDirective(D.getDirectiveKind())) {
emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
} else {
+ OMPPrivateScope LoopGlobals(CGF);
if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
for (const Expr *E : LD->counters()) {
- if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
- cast<DeclRefExpr>(E)->getDecl())) {
+ const auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
+ LValue GlobLVal = CGF.EmitLValue(E);
+ LoopGlobals.addPrivate(
+ VD, [&GlobLVal]() { return GlobLVal.getAddress(); });
+ }
+ if (isa<OMPCapturedExprDecl>(VD)) {
// Emit only those that were not explicitly referenced in clauses.
if (!CGF.LocalDeclMap.count(VD))
CGF.EmitVarDecl(*VD);
@@ -4983,6 +5056,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
}
}
}
+ LoopGlobals.Privatize();
CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
}
};
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index e29a035e31..09535900b5 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -16,9 +16,9 @@
#include "CodeGenModule.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Format.h"
#include "llvm/Transforms/Utils/Cloning.h"
@@ -304,7 +304,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
CGM.ErrorUnsupported(
MD, "non-trivial argument copy for return-adjusting thunk");
}
- EmitMustTailThunk(MD, AdjustedThisPtr, CalleePtr);
+ EmitMustTailThunk(CurGD, AdjustedThisPtr, CalleePtr);
return;
}
@@ -350,13 +350,12 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
: FPT->getReturnType();
ReturnValueSlot Slot;
if (!ResultType->isVoidType() &&
- CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
- !hasScalarEvaluationKind(CurFnInfo->getReturnType()))
+ CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect)
Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
// Now emit our call.
llvm::Instruction *CallOrInvoke;
- CGCallee Callee = CGCallee::forDirect(CalleePtr, MD);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, CurGD);
RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, &CallOrInvoke);
// Consider return adjustment if we have ThunkInfo.
@@ -375,7 +374,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
FinishThunk();
}
-void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
+void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
llvm::Value *AdjustedThisPtr,
llvm::Value *CalleePtr) {
// Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery
@@ -412,7 +411,7 @@ void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
// Apply the standard set of call attributes.
unsigned CallingConv;
llvm::AttributeList Attrs;
- CGM.ConstructAttributeList(CalleePtr->getName(), *CurFnInfo, MD, Attrs,
+ CGM.ConstructAttributeList(CalleePtr->getName(), *CurFnInfo, GD, Attrs,
CallingConv, /*AttrOnCallSite=*/true);
Call->setAttributes(Attrs);
Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 0dcbea423a..da8a8efb84 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -562,7 +562,10 @@ public:
}
void setVolatile(bool flag) {
- Quals.setVolatile(flag);
+ if (flag)
+ Quals.addVolatile();
+ else
+ Quals.removeVolatile();
}
Qualifiers::ObjCLifetime getObjCLifetime() const {
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 2a0f4f0e83..29c6793c60 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -102,4 +102,5 @@ add_clang_library(clangCodeGen
clangBasic
clangFrontend
clangLex
+ clangSerialization
)
diff --git a/lib/CodeGen/CodeGenABITypes.cpp b/lib/CodeGen/CodeGenABITypes.cpp
index c152291b15..27f5d53ffe 100644
--- a/lib/CodeGen/CodeGenABITypes.cpp
+++ b/lib/CodeGen/CodeGenABITypes.cpp
@@ -20,7 +20,6 @@
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/PreprocessorOptions.h"
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 1a2b0616dc..fd4506f2d1 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -549,12 +549,16 @@ const FullSourceLoc BackendConsumer::getBestLocationFromDebugLoc(
SourceLocation DILoc;
if (D.isLocationAvailable()) {
- D.getLocation(&Filename, &Line, &Column);
- const FileEntry *FE = FileMgr.getFile(Filename);
- if (FE && Line > 0) {
- // If -gcolumn-info was not used, Column will be 0. This upsets the
- // source manager, so pass 1 if Column is not set.
- DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1);
+ D.getLocation(Filename, Line, Column);
+ if (Line > 0) {
+ const FileEntry *FE = FileMgr.getFile(Filename);
+ if (!FE)
+ FE = FileMgr.getFile(D.getAbsolutePath());
+ if (FE) {
+ // If -gcolumn-info was not used, Column will be 0. This upsets the
+ // source manager, so pass 1 if Column is not set.
+ DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1);
+ }
}
BadDebugInfo = DILoc.isInvalid();
}
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 77f978f687..f012384f3d 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -28,10 +28,10 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
-#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Intrinsics.h"
@@ -430,10 +430,25 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
NormalCleanupDest = Address::invalid();
}
- // Add the required-vector-width attribute.
- if (LargestVectorWidth != 0)
- CurFn->addFnAttr("min-legal-vector-width",
- llvm::utostr(LargestVectorWidth));
+ // Scan function arguments for vector width.
+ for (llvm::Argument &A : CurFn->args())
+ if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
+
+ // Update vector width based on return type.
+ if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
+
+ // Add the required-vector-width attribute. This contains the max width from:
+ // 1. min-vector-width attribute used in the source program.
+ // 2. Any builtins used that have a vector width specified.
+ // 3. Values passed in and out of inline assembly.
+ // 4. Width of vector arguments and return types for this function.
+ // 5. Width of vector aguments and return types for functions called by this
+ // function.
+ CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
}
/// ShouldInstrumentFunction - Return true if the current function should be
@@ -1058,9 +1073,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
// Count the implicit return.
if (!endsWithReturn(D))
++NumReturnExprs;
- } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
- !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
- // Indirect aggregate return; emit returned value directly into sret slot.
+ } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
+ // Indirect return; emit returned value directly into sret slot.
// This reduces code size, and affects correctness in C++.
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
@@ -1188,8 +1202,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
LargestVectorWidth = VecWidth->getVectorWidth();
}
-void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
- const Stmt *Body) {
+void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
incrementProfileCounter(Body);
if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
EmitCompoundStmtWithoutScope(*S);
@@ -1357,7 +1370,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// copy-constructors.
emitImplicitAssignmentOperatorBody(Args);
} else if (Body) {
- EmitFunctionBody(Args, Body);
+ EmitFunctionBody(Body);
} else
llvm_unreachable("no definition for emitted function");
@@ -1498,10 +1511,11 @@ bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
bool AllowLabels) {
// FIXME: Rename and handle conversion of other evaluatable things
// to bool.
- llvm::APSInt Int;
- if (!Cond->EvaluateAsInt(Int, getContext()))
+ Expr::EvalResult Result;
+ if (!Cond->EvaluateAsInt(Result, getContext()))
return false; // Not foldable, not integer or not fully evaluatable.
+ llvm::APSInt Int = Result.Val.getInt();
if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
return false; // Contains a label.
@@ -1686,7 +1700,7 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// create metadata that specifies that the branch is unpredictable.
// Don't bother if not optimizing because that metadata would not be used.
llvm::MDNode *Unpredictable = nullptr;
- auto *Call = dyn_cast<CallExpr>(Cond);
+ auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
@@ -2276,7 +2290,7 @@ static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
// Now build up the set of caller features and verify that all the required
// features are there.
llvm::StringMap<bool> CallerFeatureMap;
- CGM.getFunctionFeatureMap(CallerFeatureMap, FD);
+ CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
// If we have at least one of the features in the feature list return
// true, otherwise return false.
@@ -2284,14 +2298,13 @@ static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
SmallVector<StringRef, 1> OrFeatures;
Feature.split(OrFeatures, '|');
- return std::any_of(OrFeatures.begin(), OrFeatures.end(),
- [&](StringRef Feature) {
- if (!CallerFeatureMap.lookup(Feature)) {
- FirstMissing = Feature.str();
- return false;
- }
- return true;
- });
+ return llvm::any_of(OrFeatures, [&](StringRef Feature) {
+ if (!CallerFeatureMap.lookup(Feature)) {
+ FirstMissing = Feature.str();
+ return false;
+ }
+ return true;
+ });
});
}
@@ -2378,6 +2391,29 @@ CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
return Condition;
}
+static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
+ llvm::Function *Resolver,
+ CGBuilderTy &Builder,
+ llvm::Function *FuncToReturn,
+ bool SupportsIFunc) {
+ if (SupportsIFunc) {
+ Builder.CreateRet(FuncToReturn);
+ return;
+ }
+
+ llvm::SmallVector<llvm::Value *, 10> Args;
+ llvm::for_each(Resolver->args(),
+ [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
+
+ llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
+ Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
+
+ if (Resolver->getReturnType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(Result);
+}
+
void CodeGenFunction::EmitMultiVersionResolver(
llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
assert((getContext().getTargetInfo().getTriple().getArch() ==
@@ -2385,6 +2421,9 @@ void CodeGenFunction::EmitMultiVersionResolver(
getContext().getTargetInfo().getTriple().getArch() ==
llvm::Triple::x86_64) &&
"Only implemented for x86 targets");
+
+ bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
+
// Main function's basic block.
llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
Builder.SetInsertPoint(CurBlock);
@@ -2398,13 +2437,15 @@ void CodeGenFunction::EmitMultiVersionResolver(
if (!Condition) {
assert(&RO == Options.end() - 1 &&
"Default or Generic case must be last");
- Builder.CreateRet(RO.Function);
+ CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
+ SupportsIFunc);
return;
}
llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
- llvm::IRBuilder<> RetBuilder(RetBlock);
- RetBuilder.CreateRet(RO.Function);
+ CGBuilderTy RetBuilder(*this, RetBlock);
+ CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
+ SupportsIFunc);
CurBlock = createBasicBlock("resolver_else", Resolver);
Builder.CreateCondBr(Condition, RetBlock, CurBlock);
}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 6ea2d75b31..8971accdcd 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -29,9 +29,9 @@
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/CapturedStmt.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
@@ -1197,6 +1197,8 @@ public:
private:
CGDebugInfo *DebugInfo;
+ /// Used to create unique names for artificial VLA size debug info variables.
+ unsigned VLAExprCounter = 0;
bool DisableDebugInfo = false;
/// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
@@ -1787,7 +1789,7 @@ public:
llvm::Value *ptr);
Address LoadBlockStruct();
- Address GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
+ Address GetAddrOfBlockDecl(const VarDecl *var);
/// BuildBlockByrefAddress - Computes the location of the
/// data in a variable which is declared as __block.
@@ -1825,7 +1827,7 @@ public:
void EmitConstructorBody(FunctionArgList &Args);
void EmitDestructorBody(FunctionArgList &Args);
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
- void EmitFunctionBody(FunctionArgList &Args, const Stmt *Body);
+ void EmitFunctionBody(const Stmt *Body);
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
@@ -1854,7 +1856,7 @@ public:
void FinishThunk();
/// Emit a musttail call for a thunk with a potentially adjusted this pointer.
- void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
+ void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
llvm::Value *Callee);
/// Generate a thunk for the given method.
@@ -2683,8 +2685,9 @@ public:
llvm::Value *NRVOFlag;
- /// True if the variable is a __block variable.
- bool IsByRef;
+ /// True if the variable is a __block variable that is captured by an
+ /// escaping block.
+ bool IsEscapingByRef;
/// True if the variable is of aggregate type and has a constant
/// initializer.
@@ -2704,7 +2707,7 @@ public:
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
- IsByRef(false), IsConstantAggregate(false),
+ IsEscapingByRef(false), IsConstantAggregate(false),
SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
@@ -2734,7 +2737,7 @@ public:
/// Note that this does not chase the forwarding pointer for
/// __block decls.
Address getObjectAddress(CodeGenFunction &CGF) const {
- if (!IsByRef) return Addr;
+ if (!IsEscapingByRef) return Addr;
return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
}
@@ -3523,6 +3526,7 @@ public:
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
+ llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
AggValueSlot slot = AggValueSlot::ignored());
@@ -3677,9 +3681,8 @@ public:
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue);
- RValue EmitBuiltinExpr(const FunctionDecl *FD,
- unsigned BuiltinID, const CallExpr *E,
- ReturnValueSlot ReturnValue);
+ RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
+ const CallExpr *E, ReturnValueSlot ReturnValue);
RValue emitRotate(const CallExpr *E, bool IsRotateRight);
@@ -3802,6 +3805,10 @@ public:
std::pair<LValue,llvm::Value*>
EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
+ llvm::Value *EmitObjCAlloc(llvm::Value *value,
+ llvm::Type *returnType);
+ llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
+ llvm::Type *returnType);
llvm::Value *EmitObjCThrowOperand(const Expr *expr);
llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
@@ -3891,6 +3898,8 @@ public:
AddInitializerToStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable *GV);
+ // Emit an @llvm.invariant.start call for the given memory region.
+ void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
@@ -3926,9 +3935,10 @@ public:
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
- void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
- ArrayRef<llvm::Function *> CXXThreadLocals,
- Address Guard = Address::invalid());
+ void
+ GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ ArrayRef<llvm::Function *> CXXThreadLocals,
+ ConstantAddress Guard = ConstantAddress::invalid());
/// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
/// variables.
@@ -3946,11 +3956,13 @@ public:
void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
- void enterFullExpression(const ExprWithCleanups *E) {
- if (E->getNumObjects() == 0) return;
+ void enterFullExpression(const FullExpr *E) {
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(E))
+ if (EWC->getNumObjects() == 0)
+ return;
enterNonTrivialFullExpression(E);
}
- void enterNonTrivialFullExpression(const ExprWithCleanups *E);
+ void enterNonTrivialFullExpression(const FullExpr *E);
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
@@ -4273,6 +4285,7 @@ public:
struct MultiVersionResolverOption {
llvm::Function *Function;
+ FunctionDecl *FD;
struct Conds {
StringRef Architecture;
llvm::SmallVector<StringRef, 8> Features;
@@ -4292,22 +4305,7 @@ public:
void EmitMultiVersionResolver(llvm::Function *Resolver,
ArrayRef<MultiVersionResolverOption> Options);
- struct CPUDispatchMultiVersionResolverOption {
- llvm::Function *Function;
- // Note: EmitX86CPUSupports only has 32 bits available, so we store the mask
- // as 32 bits here. When 64-bit support is added to __builtin_cpu_supports,
- // this can be extended to 64 bits.
- uint32_t FeatureMask;
- CPUDispatchMultiVersionResolverOption(llvm::Function *F, uint64_t Mask)
- : Function(F), FeatureMask(static_cast<uint32_t>(Mask)) {}
- bool operator>(const CPUDispatchMultiVersionResolverOption &Other) const {
- return FeatureMask > Other.FeatureMask;
- }
- };
- void EmitCPUDispatchMultiVersionResolver(
- llvm::Function *Resolver,
- ArrayRef<CPUDispatchMultiVersionResolverOption> Options);
- static uint32_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
+ static uint64_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
private:
QualType getVarArgType(const Expr *Arg);
@@ -4324,7 +4322,7 @@ private:
llvm::Value *EmitX86CpuIs(StringRef CPUStr);
llvm::Value *EmitX86CpuSupports(const CallExpr *E);
llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
- llvm::Value *EmitX86CpuSupports(uint32_t Mask);
+ llvm::Value *EmitX86CpuSupports(uint64_t Mask);
llvm::Value *EmitX86CpuInit();
llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
};
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index c2fb4797cc..df814d6386 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -36,14 +36,14 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
-#include "clang/Frontend/CodeGenOptions.h"
-#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
@@ -126,7 +126,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
- if (LangOpts.ObjC1)
+ if (LangOpts.ObjC)
createObjCRuntime();
if (LangOpts.OpenCL)
createOpenCLRuntime();
@@ -149,12 +149,12 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
Block.GlobalUniqueCount = 0;
- if (C.getLangOpts().ObjC1)
+ if (C.getLangOpts().ObjC)
ObjCData.reset(new ObjCEntrypoints());
if (CodeGenOpts.hasProfileClangUse()) {
auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
- CodeGenOpts.ProfileInstrumentUsePath);
+ CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
if (auto E = ReaderOrErr.takeError()) {
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"Could not read profile %0: %1");
@@ -322,8 +322,6 @@ void CodeGenModule::checkAliases() {
assert(FTy);
if (!FTy->getReturnType()->isPointerTy())
Diags.Report(Location, diag::err_ifunc_resolver_return);
- if (FTy->getNumParams())
- Diags.Report(Location, diag::err_ifunc_resolver_params);
}
llvm::Constant *Aliasee = Alias->getIndirectSymbol();
@@ -460,6 +458,9 @@ void CodeGenModule::Release() {
// Indicate that we want CodeView in the metadata.
getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
}
+ if (CodeGenOpts.CodeViewGHash) {
+ getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
+ }
if (CodeGenOpts.ControlFlowGuard) {
// We want function ID tables for Control Flow Guard.
getModule().addModuleFlag(llvm::Module::Warning, "cfguardtable", 1);
@@ -589,6 +590,9 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().EmitVersionIdentMetadata)
EmitVersionIdentMetadata();
+ if (!getCodeGenOpts().RecordCommandLine.empty())
+ EmitCommandLineMetadata();
+
EmitTargetMetadata();
}
@@ -893,11 +897,13 @@ static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
const CPUSpecificAttr *Attr,
+ unsigned CPUIndex,
raw_ostream &Out) {
- // cpu_specific gets the current name, dispatch gets the resolver.
+ // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
+ // supported.
if (Attr)
- Out << getCPUSpecificMangling(CGM, Attr->getCurCPUName()->getName());
- else
+ Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
+ else if (CGM.getTarget().supportsIFunc())
Out << ".resolver";
}
@@ -963,11 +969,19 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
if (const auto *FD = dyn_cast<FunctionDecl>(ND))
if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
- if (FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion())
- AppendCPUSpecificCPUDispatchMangling(
- CGM, FD->getAttr<CPUSpecificAttr>(), Out);
- else
+ switch (FD->getMultiVersionKind()) {
+ case MultiVersionKind::CPUDispatch:
+ case MultiVersionKind::CPUSpecific:
+ AppendCPUSpecificCPUDispatchMangling(CGM,
+ FD->getAttr<CPUSpecificAttr>(),
+ GD.getMultiVersionIndex(), Out);
+ break;
+ case MultiVersionKind::Target:
AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
+ break;
+ case MultiVersionKind::None:
+ llvm_unreachable("None multiversion type isn't valid here");
+ }
}
return Out.str();
@@ -992,8 +1006,10 @@ void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
"Other GD should now be a multiversioned function");
// OtherFD is the version of this function that was mangled BEFORE
// becoming a MultiVersion function. It potentially needs to be updated.
- const FunctionDecl *OtherFD =
- OtherGD.getCanonicalDecl().getDecl()->getAsFunction();
+ const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
+ .getDecl()
+ ->getAsFunction()
+ ->getMostRecentDecl();
std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
// This is so that if the initial version was already the 'default'
// version, we don't try to update it.
@@ -1025,26 +1041,6 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
}
}
- const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl());
- // Since CPUSpecific can require multiple emits per decl, store the manglings
- // separately.
- if (FD &&
- (FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion())) {
- const auto *SD = FD->getAttr<CPUSpecificAttr>();
-
- std::pair<GlobalDecl, unsigned> SpecCanonicalGD{
- CanonicalGD,
- SD ? SD->ActiveArgIndex : std::numeric_limits<unsigned>::max()};
-
- auto FoundName = CPUSpecificMangledDeclNames.find(SpecCanonicalGD);
- if (FoundName != CPUSpecificMangledDeclNames.end())
- return FoundName->second;
-
- auto Result = CPUSpecificManglings.insert(
- std::make_pair(getMangledNameImpl(*this, GD, FD), SpecCanonicalGD));
- return CPUSpecificMangledDeclNames[SpecCanonicalGD] = Result.first->first();
- }
-
auto FoundName = MangledDeclNames.find(CanonicalGD);
if (FoundName != MangledDeclNames.end())
return FoundName->second;
@@ -1106,11 +1102,12 @@ void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
// Ctor function type is void()*.
llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
- llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+ llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
+ TheModule.getDataLayout().getProgramAddressSpace());
// Get the type of a ctor entry, { i32, void ()*, i8* }.
llvm::StructType *CtorStructTy = llvm::StructType::get(
- Int32Ty, llvm::PointerType::getUnqual(CtorFTy), VoidPtrTy);
+ Int32Ty, CtorPFTy, VoidPtrTy);
// Construct the constructor and destructor arrays.
ConstantInitBuilder builder(*this);
@@ -1166,12 +1163,12 @@ llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
}
-void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
const CGFunctionInfo &Info,
llvm::Function *F) {
unsigned CallingConv;
llvm::AttributeList PAL;
- ConstructAttributeList(F->getName(), Info, D, PAL, CallingConv, false);
+ ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv, false);
F->setAttributes(PAL);
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -1301,9 +1298,19 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
// Otherwise, propagate the inline hint attribute and potentially use its
// absence to mark things as noinline.
if (auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (any_of(FD->redecls(), [&](const FunctionDecl *Redecl) {
- return Redecl->isInlineSpecified();
- })) {
+ // Search function and template pattern redeclarations for inline.
+ auto CheckForInline = [](const FunctionDecl *FD) {
+ auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
+ return Redecl->isInlineSpecified();
+ };
+ if (any_of(FD->redecls(), CheckRedeclForInline))
+ return true;
+ const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
+ if (!Pattern)
+ return false;
+ return any_of(Pattern->redecls(), CheckRedeclForInline);
+ };
+ if (CheckForInline(FD)) {
B.addAttribute(llvm::Attribute::InlineHint);
} else if (CodeGenOpts.getInlining() ==
CodeGenOptions::OnlyHintInlining &&
@@ -1377,26 +1384,27 @@ void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
const auto *VD = cast<VarDecl>(D);
- if (VD->getType().isConstQualified() && VD->getStorageClass() == SC_Static)
+ if (VD->getType().isConstQualified() &&
+ VD->getStorageDuration() == SD_Static)
addUsedGlobal(GV);
}
}
-bool CodeGenModule::GetCPUAndFeaturesAttributes(const Decl *D,
+bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
llvm::AttrBuilder &Attrs) {
// Add target-cpu and target-features attributes to functions. If
// we have a decl for the function and it has a target attribute then
// parse that and add it to the feature set.
StringRef TargetCPU = getTarget().getTargetOpts().CPU;
std::vector<std::string> Features;
- const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
FD = FD ? FD->getMostRecentDecl() : FD;
const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
bool AddedAttr = false;
if (TD || SD) {
llvm::StringMap<bool> FeatureMap;
- getFunctionFeatureMap(FeatureMap, FD);
+ getFunctionFeatureMap(FeatureMap, GD);
// Produce the canonical string for this set of features.
for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
@@ -1452,7 +1460,7 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
F->addFnAttr("implicit-section-name", SA->getName());
llvm::AttrBuilder Attrs;
- if (GetCPUAndFeaturesAttributes(D, Attrs)) {
+ if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
// We know that GetCPUAndFeaturesAttributes will always have the
// newest set, since it has the newest possible FunctionDecl, so the
// new ones should replace the old.
@@ -1475,7 +1483,7 @@ void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
llvm::Function *F,
const CGFunctionInfo &FI) {
const Decl *D = GD.getDecl();
- SetLLVMFunctionAttributes(D, FI, F);
+ SetLLVMFunctionAttributes(GD, FI, F);
SetLLVMFunctionAttributesForDefinition(D, F);
F->setLinkage(llvm::Function::InternalLinkage);
@@ -1537,7 +1545,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
const auto *FD = cast<FunctionDecl>(GD.getDecl());
if (!IsIncompleteFunction) {
- SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
+ SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F);
// Setup target-specific attributes.
if (F->isDeclaration())
getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
@@ -2015,7 +2023,7 @@ bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
if (CodeGenOpts.KeepStaticConsts) {
const auto *VD = dyn_cast<VarDecl>(Global);
if (VD && VD->getType().isConstQualified() &&
- VD->getStorageClass() == SC_Static)
+ VD->getStorageDuration() == SD_Static)
return true;
}
@@ -2413,6 +2421,19 @@ bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
return CodeGenOpts.OptimizationLevel > 0;
}
+void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
+ llvm::GlobalValue *GV) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (FD->isCPUSpecificMultiVersion()) {
+ auto *Spec = FD->getAttr<CPUSpecificAttr>();
+ for (unsigned I = 0; I < Spec->cpus_size(); ++I)
+ EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
+ // Requires multiple emits.
+ } else
+ EmitGlobalFunctionDefinition(GD, GV);
+}
+
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
const auto *D = cast<ValueDecl>(GD.getDecl());
@@ -2420,7 +2441,7 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
Context.getSourceManager(),
"Generating code for declaration");
- if (isa<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// At -O0, don't generate IR for functions with available_externally
// linkage.
if (!shouldEmitFunction(GD))
@@ -2433,6 +2454,8 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
ABI->emitCXXStructor(CD, getFromCtorType(GD.getCtorType()));
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Method))
ABI->emitCXXStructor(DD, getFromDtorType(GD.getDtorType()));
+ else if (FD->isMultiVersion())
+ EmitMultiVersionFunctionDefinition(GD, GV);
else
EmitGlobalFunctionDefinition(GD, GV);
@@ -2442,6 +2465,8 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
return;
}
+ if (FD->isMultiVersion())
+ return EmitMultiVersionFunctionDefinition(GD, GV);
return EmitGlobalFunctionDefinition(GD, GV);
}
@@ -2499,13 +2524,19 @@ void CodeGenModule::emitMultiVersionFunctions() {
TA->getArchitecture(), Feats);
});
- llvm::Function *ResolverFunc = cast<llvm::Function>(
- GetGlobalValue((getMangledName(GD) + ".resolver").str()));
+ llvm::Function *ResolverFunc;
+ const TargetInfo &TI = getTarget();
+
+ if (TI.supportsIFunc() || FD->isTargetMultiVersion())
+ ResolverFunc = cast<llvm::Function>(
+ GetGlobalValue((getMangledName(GD) + ".resolver").str()));
+ else
+ ResolverFunc = cast<llvm::Function>(GetGlobalValue(getMangledName(GD)));
+
if (supportsCOMDAT())
ResolverFunc->setComdat(
getModule().getOrInsertComdat(ResolverFunc->getName()));
- const TargetInfo &TI = getTarget();
std::stable_sort(
Options.begin(), Options.end(),
[&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
@@ -2522,26 +2553,58 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
assert(FD && "Not a FunctionDecl?");
const auto *DD = FD->getAttr<CPUDispatchAttr>();
assert(DD && "Not a cpu_dispatch Function?");
- llvm::Type *DeclTy = getTypes().ConvertTypeForMem(FD->getType());
+ QualType CanonTy = Context.getCanonicalType(FD->getType());
+ llvm::Type *DeclTy = getTypes().ConvertFunctionType(CanonTy, FD);
+
+ if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
+ DeclTy = getTypes().GetFunctionType(FInfo);
+ }
StringRef ResolverName = getMangledName(GD);
- llvm::Type *ResolverType = llvm::FunctionType::get(
- llvm::PointerType::get(DeclTy,
- Context.getTargetAddressSpace(FD->getType())),
- false);
- auto *ResolverFunc = cast<llvm::Function>(
- GetOrCreateLLVMFunction(ResolverName, ResolverType, GlobalDecl{},
- /*ForVTable=*/false));
+
+ llvm::Type *ResolverType;
+ GlobalDecl ResolverGD;
+ if (getTarget().supportsIFunc())
+ ResolverType = llvm::FunctionType::get(
+ llvm::PointerType::get(DeclTy,
+ Context.getTargetAddressSpace(FD->getType())),
+ false);
+ else {
+ ResolverType = DeclTy;
+ ResolverGD = GD;
+ }
+
+ auto *ResolverFunc = cast<llvm::Function>(GetOrCreateLLVMFunction(
+ ResolverName, ResolverType, ResolverGD, /*ForVTable=*/false));
SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
const TargetInfo &Target = getTarget();
+ unsigned Index = 0;
for (const IdentifierInfo *II : DD->cpus()) {
// Get the name of the target function so we can look it up/create it.
std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
getCPUSpecificMangling(*this, II->getName());
- llvm::Constant *Func = GetOrCreateLLVMFunction(
- MangledName, DeclTy, GD, /*ForVTable=*/false, /*DontDefer=*/false,
- /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
+
+ llvm::Constant *Func = GetGlobalValue(MangledName);
+
+ if (!Func) {
+ GlobalDecl ExistingDecl = Manglings.lookup(MangledName);
+ if (ExistingDecl.getDecl() &&
+ ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
+ EmitGlobalFunctionDefinition(ExistingDecl, nullptr);
+ Func = GetGlobalValue(MangledName);
+ } else {
+ if (!ExistingDecl.getDecl())
+ ExistingDecl = GD.getWithMultiVersionIndex(Index);
+
+ Func = GetOrCreateLLVMFunction(
+ MangledName, DeclTy, ExistingDecl,
+ /*ForVTable=*/false, /*DontDefer=*/true,
+ /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
+ }
+ }
+
llvm::SmallVector<StringRef, 32> Features;
Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
llvm::transform(Features, Features.begin(),
@@ -2551,29 +2614,53 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
return !Target.validateCpuSupports(Feat);
}), Features.end());
Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
+ ++Index;
}
llvm::sort(
- Options.begin(), Options.end(),
- [](const CodeGenFunction::MultiVersionResolverOption &LHS,
- const CodeGenFunction::MultiVersionResolverOption &RHS) {
+ Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
+ const CodeGenFunction::MultiVersionResolverOption &RHS) {
return CodeGenFunction::GetX86CpuSupportsMask(LHS.Conditions.Features) >
CodeGenFunction::GetX86CpuSupportsMask(RHS.Conditions.Features);
});
+
+ // If the list contains multiple 'default' versions, such as when it contains
+ // 'pentium' and 'generic', don't emit the call to the generic one (since we
+ // always run on at least a 'pentium'). We do this by deleting the 'least
+ // advanced' (read, lowest mangling letter).
+ while (Options.size() > 1 &&
+ CodeGenFunction::GetX86CpuSupportsMask(
+ (Options.end() - 2)->Conditions.Features) == 0) {
+ StringRef LHSName = (Options.end() - 2)->Function->getName();
+ StringRef RHSName = (Options.end() - 1)->Function->getName();
+ if (LHSName.compare(RHSName) < 0)
+ Options.erase(Options.end() - 2);
+ else
+ Options.erase(Options.end() - 1);
+ }
+
CodeGenFunction CGF(*this);
CGF.EmitMultiVersionResolver(ResolverFunc, Options);
}
-/// If an ifunc for the specified mangled name is not in the module, create and
-/// return an llvm IFunc Function with the specified type.
-llvm::Constant *
-CodeGenModule::GetOrCreateMultiVersionIFunc(GlobalDecl GD, llvm::Type *DeclTy,
- const FunctionDecl *FD) {
+/// If a dispatcher for the specified mangled name is not in the module, create
+/// and return an llvm Function with the specified type.
+llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
+ GlobalDecl GD, llvm::Type *DeclTy, const FunctionDecl *FD) {
std::string MangledName =
getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
- std::string IFuncName = MangledName + ".ifunc";
- if (llvm::GlobalValue *IFuncGV = GetGlobalValue(IFuncName))
- return IFuncGV;
+
+ // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
+ // a separate resolver).
+ std::string ResolverName = MangledName;
+ if (getTarget().supportsIFunc())
+ ResolverName += ".ifunc";
+ else if (FD->isTargetMultiVersion())
+ ResolverName += ".resolver";
+
+ // If this already exists, just return that one.
+ if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
+ return ResolverGV;
// Since this is the first time we've created this IFunc, make sure
// that we put this multiversioned function into the list to be
@@ -2581,20 +2668,28 @@ CodeGenModule::GetOrCreateMultiVersionIFunc(GlobalDecl GD, llvm::Type *DeclTy,
if (!FD->isCPUDispatchMultiVersion() && !FD->isCPUSpecificMultiVersion())
MultiVersionFuncs.push_back(GD);
- std::string ResolverName = MangledName + ".resolver";
- llvm::Type *ResolverType = llvm::FunctionType::get(
- llvm::PointerType::get(DeclTy,
- Context.getTargetAddressSpace(FD->getType())),
- false);
- llvm::Constant *Resolver =
- GetOrCreateLLVMFunction(ResolverName, ResolverType, GlobalDecl{},
- /*ForVTable=*/false);
- llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
- DeclTy, 0, llvm::Function::ExternalLinkage, "", Resolver, &getModule());
- GIF->setName(IFuncName);
- SetCommonAttributes(FD, GIF);
+ if (getTarget().supportsIFunc()) {
+ llvm::Type *ResolverType = llvm::FunctionType::get(
+ llvm::PointerType::get(
+ DeclTy, getContext().getTargetAddressSpace(FD->getType())),
+ false);
+ llvm::Constant *Resolver = GetOrCreateLLVMFunction(
+ MangledName + ".resolver", ResolverType, GlobalDecl{},
+ /*ForVTable=*/false);
+ llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
+ DeclTy, 0, llvm::Function::ExternalLinkage, "", Resolver, &getModule());
+ GIF->setName(ResolverName);
+ SetCommonAttributes(FD, GIF);
- return GIF;
+ return GIF;
+ }
+
+ llvm::Constant *Resolver = GetOrCreateLLVMFunction(
+ ResolverName, DeclTy, GlobalDecl{}, /*ForVTable=*/false);
+ assert(isa<llvm::GlobalValue>(Resolver) &&
+ "Resolver should be created for the first time");
+ SetCommonAttributes(FD, cast<llvm::GlobalValue>(Resolver));
+ return Resolver;
}
/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
@@ -2634,7 +2729,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
if (TA && TA->isDefaultVersion())
UpdateMultiVersionNames(GD, FD);
if (!IsForDefinition)
- return GetOrCreateMultiVersionIFunc(GD, Ty, FD);
+ return GetOrCreateMultiVersionResolver(GD, Ty, FD);
}
}
@@ -3393,8 +3488,15 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// CUDA E.2.4.1 "__shared__ variables cannot have an initialization
// as part of their declaration." Sema has already checked for
// error cases, so we just need to set Init to UndefValue.
- if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
- D->hasAttr<CUDASharedAttr>())
+ bool IsCUDASharedVar =
+ getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
+ // Shadows of initialized device-side global variables are also left
+ // undefined.
+ bool IsCUDAShadowVar =
+ !getLangOpts().CUDAIsDevice &&
+ (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
+ D->hasAttr<CUDASharedAttr>());
+ if (getLangOpts().CUDA && (IsCUDASharedVar || IsCUDAShadowVar))
Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
@@ -3672,6 +3774,10 @@ llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
return llvm::GlobalVariable::WeakAnyLinkage;
}
+ if (const auto *FD = D->getAsFunction())
+ if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
+ return llvm::GlobalVariable::LinkOnceAnyLinkage;
+
// We are guaranteed to have a strong definition somewhere else,
// so we can use available_externally linkage.
if (Linkage == GVA_AvailableExternally)
@@ -3908,15 +4014,6 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
AddGlobalDtor(Fn, DA->getPriority());
if (D->hasAttr<AnnotateAttr>())
AddGlobalAnnotations(D, Fn);
-
- if (D->isCPUSpecificMultiVersion()) {
- auto *Spec = D->getAttr<CPUSpecificAttr>();
- // If there is another specific version we need to emit, do so here.
- if (Spec->ActiveArgIndex + 1 < Spec->cpus_size()) {
- ++Spec->ActiveArgIndex;
- EmitGlobalFunctionDefinition(GD, nullptr);
- }
- }
}
void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
@@ -4109,51 +4206,82 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
-
+
+ const ASTContext &Context = getContext();
+ const llvm::Triple &Triple = getTriple();
+
+ const auto CFRuntime = getLangOpts().CFRuntime;
+ const bool IsSwiftABI =
+ static_cast<unsigned>(CFRuntime) >=
+ static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
+ const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
+
// If we don't already have it, get __CFConstantStringClassReference.
if (!CFConstantStringClassRef) {
+ const char *CFConstantStringClassName = "__CFConstantStringClassReference";
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
- llvm::Constant *C =
- CreateRuntimeVariable(Ty, "__CFConstantStringClassReference");
-
- if (getTriple().isOSBinFormatELF() || getTriple().isOSBinFormatCOFF()) {
+
+ switch (CFRuntime) {
+ default: break;
+ case LangOptions::CoreFoundationABI::Swift: LLVM_FALLTHROUGH;
+ case LangOptions::CoreFoundationABI::Swift5_0:
+ CFConstantStringClassName =
+ Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
+ : "$s10Foundation19_NSCFConstantStringCN";
+ Ty = IntPtrTy;
+ break;
+ case LangOptions::CoreFoundationABI::Swift4_2:
+ CFConstantStringClassName =
+ Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
+ : "$S10Foundation19_NSCFConstantStringCN";
+ Ty = IntPtrTy;
+ break;
+ case LangOptions::CoreFoundationABI::Swift4_1:
+ CFConstantStringClassName =
+ Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
+ : "__T010Foundation19_NSCFConstantStringCN";
+ Ty = IntPtrTy;
+ break;
+ }
+
+ llvm::Constant *C = CreateRuntimeVariable(Ty, CFConstantStringClassName);
+
+ if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
llvm::GlobalValue *GV = nullptr;
-
+
if ((GV = dyn_cast<llvm::GlobalValue>(C))) {
- IdentifierInfo &II = getContext().Idents.get(GV->getName());
- TranslationUnitDecl *TUDecl = getContext().getTranslationUnitDecl();
+ IdentifierInfo &II = Context.Idents.get(GV->getName());
+ TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
const VarDecl *VD = nullptr;
for (const auto &Result : DC->lookup(&II))
if ((VD = dyn_cast<VarDecl>(Result)))
break;
-
- if (getTriple().isOSBinFormatELF()) {
+
+ if (Triple.isOSBinFormatELF()) {
if (!VD)
GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
- }
- else {
- if (!VD || !VD->hasAttr<DLLExportAttr>()) {
+ } else {
+ GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ if (!VD || !VD->hasAttr<DLLExportAttr>())
GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
- GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
- } else {
+ else
GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
- }
}
-
+
setDSOLocal(GV);
}
}
-
+
// Decay array -> ptr
CFConstantStringClassRef =
- llvm::ConstantExpr::getGetElementPtr(Ty, C, Zeros);
+ IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty)
+ : llvm::ConstantExpr::getGetElementPtr(Ty, C, Zeros);
}
- QualType CFTy = getContext().getCFConstantStringType();
+ QualType CFTy = Context.getCFConstantStringType();
auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
@@ -4164,7 +4292,12 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
Fields.add(cast<llvm::ConstantExpr>(CFConstantStringClassRef));
// Flags.
- Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
+ if (IsSwiftABI) {
+ Fields.addInt(IntPtrTy, IsSwift4_1 ? 0x05 : 0x01);
+ Fields.addInt(Int64Ty, isUTF16 ? 0x07d0 : 0x07c8);
+ } else {
+ Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
+ }
// String pointer.
llvm::Constant *C = nullptr;
@@ -4185,22 +4318,21 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// Don't enforce the target's minimum global alignment, since the only use
// of the string is via this class initializer.
- CharUnits Align = isUTF16
- ? getContext().getTypeAlignInChars(getContext().ShortTy)
- : getContext().getTypeAlignInChars(getContext().CharTy);
+ CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(Context.ShortTy)
+ : Context.getTypeAlignInChars(Context.CharTy);
GV->setAlignment(Align.getQuantity());
// FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
// Without it LLVM can merge the string with a non unnamed_addr one during
// LTO. Doing that changes the section it ends in, which surprises ld64.
- if (getTriple().isOSBinFormatMachO())
+ if (Triple.isOSBinFormatMachO())
GV->setSection(isUTF16 ? "__TEXT,__ustring"
: "__TEXT,__cstring,cstring_literals");
// Make sure the literal ends up in .rodata to allow for safe ICF and for
// the static linker to adjust permissions to read-only later on.
- else if (getTriple().isOSBinFormatELF())
+ else if (Triple.isOSBinFormatELF())
GV->setSection(".rodata");
-
+
// String.
llvm::Constant *Str =
llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
@@ -4211,8 +4343,17 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
Fields.add(Str);
// String length.
- auto Ty = getTypes().ConvertType(getContext().LongTy);
- Fields.addInt(cast<llvm::IntegerType>(Ty), StringLength);
+ llvm::IntegerType *LengthTy =
+ llvm::IntegerType::get(getModule().getContext(),
+ Context.getTargetInfo().getLongWidth());
+ if (IsSwiftABI) {
+ if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
+ CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
+ LengthTy = Int32Ty;
+ else
+ LengthTy = IntPtrTy;
+ }
+ Fields.addInt(LengthTy, StringLength);
CharUnits Alignment = getPointerAlign();
@@ -4220,7 +4361,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
/*isConstant=*/false,
llvm::GlobalVariable::PrivateLinkage);
- switch (getTriple().getObjectFormat()) {
+ switch (Triple.getObjectFormat()) {
case llvm::Triple::UnknownObjectFormat:
llvm_unreachable("unknown file format");
case llvm::Triple::COFF:
@@ -4713,6 +4854,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::TypeAliasTemplate:
case Decl::Block:
case Decl::Empty:
+ case Decl::Binding:
break;
case Decl::Using: // using X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
@@ -4879,7 +5021,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::OMPDeclareReduction:
EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(D));
break;
-
+
case Decl::OMPRequires:
EmitOMPRequiresDecl(cast<OMPRequiresDecl>(D));
break;
@@ -5078,6 +5220,16 @@ void CodeGenModule::EmitVersionIdentMetadata() {
IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode));
}
+void CodeGenModule::EmitCommandLineMetadata() {
+ llvm::NamedMDNode *CommandLineMetadata =
+ TheModule.getOrInsertNamedMetadata("llvm.commandline");
+ std::string CommandLine = getCodeGenOpts().RecordCommandLine;
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+
+ llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Ctx, CommandLine)};
+ CommandLineMetadata->addOperand(llvm::MDNode::get(Ctx, CommandLineNode));
+}
+
void CodeGenModule::EmitTargetMetadata() {
// Warning, new MangledDeclNames may be appended within this loop.
// We rely on MapVector insertions adding new elements to the end
@@ -5293,8 +5445,9 @@ TargetAttr::ParsedTargetAttr CodeGenModule::filterFunctionTargetAttrs(const Targ
// Fills in the supplied string map with the set of target features for the
// passed in function.
void CodeGenModule::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
- const FunctionDecl *FD) {
+ GlobalDecl GD) {
StringRef TargetCPU = Target.getTargetOpts().CPU;
+ const FunctionDecl *FD = GD.getDecl()->getAsFunction();
if (const auto *TD = FD->getAttr<TargetAttr>()) {
TargetAttr::ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
@@ -5316,8 +5469,8 @@ void CodeGenModule::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
ParsedAttr.Features);
} else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
llvm::SmallVector<StringRef, 32> FeaturesTmp;
- Target.getCPUSpecificCPUDispatchFeatures(SD->getCurCPUName()->getName(),
- FeaturesTmp);
+ Target.getCPUSpecificCPUDispatchFeatures(
+ SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp);
std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
Target.initFeatureMap(FeatureMap, getDiags(), TargetCPU, Features);
} else {
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index baf3619ca8..0f6c3bec9e 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -119,7 +119,13 @@ struct OrderGlobalInits {
struct ObjCEntrypoints {
ObjCEntrypoints() { memset(this, 0, sizeof(*this)); }
- /// void objc_autoreleasePoolPop(void*);
+ /// void objc_alloc(id);
+ llvm::Constant *objc_alloc;
+
+ /// void objc_allocWithZone(id);
+ llvm::Constant *objc_allocWithZone;
+
+ /// void objc_autoreleasePoolPop(void*);
llvm::Constant *objc_autoreleasePoolPop;
/// void *objc_autoreleasePoolPush(void);
@@ -1043,8 +1049,7 @@ public:
const CGFunctionInfo &FI);
/// Set the LLVM function attributes (sext, zext, etc).
- void SetLLVMFunctionAttributes(const Decl *D,
- const CGFunctionInfo &Info,
+ void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info,
llvm::Function *F);
/// Set the LLVM function attributes which only apply to a function
@@ -1104,8 +1109,7 @@ public:
// Fills in the supplied string map with the set of target features for the
// passed in function.
- void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
- const FunctionDecl *FD);
+ void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, GlobalDecl GD);
StringRef getMangledName(GlobalDecl GD);
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD);
@@ -1293,9 +1297,9 @@ private:
llvm::AttributeList ExtraAttrs = llvm::AttributeList(),
ForDefinition_t IsForDefinition = NotForDefinition);
- llvm::Constant *GetOrCreateMultiVersionIFunc(GlobalDecl GD,
- llvm::Type *DeclTy,
- const FunctionDecl *FD);
+ llvm::Constant *GetOrCreateMultiVersionResolver(GlobalDecl GD,
+ llvm::Type *DeclTy,
+ const FunctionDecl *FD);
void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD);
llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
@@ -1304,7 +1308,7 @@ private:
ForDefinition_t IsForDefinition
= NotForDefinition);
- bool GetCPUAndFeaturesAttributes(const Decl *D,
+ bool GetCPUAndFeaturesAttributes(GlobalDecl GD,
llvm::AttrBuilder &AttrBuilder);
void setNonAliasAttributes(GlobalDecl GD, llvm::GlobalObject *GO);
@@ -1315,6 +1319,8 @@ private:
void EmitGlobalDefinition(GlobalDecl D, llvm::GlobalValue *GV = nullptr);
void EmitGlobalFunctionDefinition(GlobalDecl GD, llvm::GlobalValue *GV);
+ void EmitMultiVersionFunctionDefinition(GlobalDecl GD, llvm::GlobalValue *GV);
+
void EmitGlobalVarDefinition(const VarDecl *D, bool IsTentative = false);
void EmitAliasDefinition(GlobalDecl GD);
void emitIFuncDefinition(GlobalDecl GD);
@@ -1402,6 +1408,9 @@ private:
/// Emit the Clang version as llvm.ident metadata.
void EmitVersionIdentMetadata();
+ /// Emit the Clang commandline as llvm.commandline metadata.
+ void EmitCommandLineMetadata();
+
/// Emits target specific Metadata for global declarations.
void EmitTargetMetadata();
diff --git a/lib/CodeGen/CodeGenPGO.h b/lib/CodeGen/CodeGenPGO.h
index 0759e65388..120ab651a4 100644
--- a/lib/CodeGen/CodeGenPGO.h
+++ b/lib/CodeGen/CodeGenPGO.h
@@ -17,7 +17,6 @@
#include "CGBuilder.h"
#include "CodeGenModule.h"
#include "CodeGenTypes.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include <array>
#include <memory>
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index ec48231e52..27d39716d2 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/LLVMContext.h"
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 1a1395e6ae..2acf1ac161 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -503,6 +503,9 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 626869f000..8e344e91b8 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -17,7 +17,6 @@
#include "CGCall.h"
#include "clang/Basic/ABI.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Sema/Sema.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/Module.h"
diff --git a/lib/CodeGen/ConstantEmitter.h b/lib/CodeGen/ConstantEmitter.h
index b4d1b65743..7ad8e5d37c 100644
--- a/lib/CodeGen/ConstantEmitter.h
+++ b/lib/CodeGen/ConstantEmitter.h
@@ -38,6 +38,9 @@ private:
/// Whether the constant-emission failed.
bool Failed = false;
+ /// Whether we're in a constant context.
+ bool InConstantContext = false;
+
/// The AST address space where this (non-abstract) initializer is going.
/// Used for generating appropriate placeholders.
LangAS DestAddressSpace;
diff --git a/lib/CodeGen/CoverageMappingGen.cpp b/lib/CodeGen/CoverageMappingGen.cpp
index e75f2c8091..35962c73d9 100644
--- a/lib/CodeGen/CoverageMappingGen.cpp
+++ b/lib/CodeGen/CoverageMappingGen.cpp
@@ -552,6 +552,15 @@ struct CounterCoverageMappingBuilder
completeDeferred(Count, DeferredEndLoc);
}
+ size_t locationDepth(SourceLocation Loc) {
+ size_t Depth = 0;
+ while (Loc.isValid()) {
+ Loc = getIncludeOrExpansionLoc(Loc);
+ Depth++;
+ }
+ return Depth;
+ }
+
/// Pop regions from the stack into the function's list of regions.
///
/// Adds all regions from \c ParentIndex to the top of the stack to the
@@ -566,19 +575,41 @@ struct CounterCoverageMappingBuilder
SourceLocation EndLoc = Region.hasEndLoc()
? Region.getEndLoc()
: RegionStack[ParentIndex].getEndLoc();
+ size_t StartDepth = locationDepth(StartLoc);
+ size_t EndDepth = locationDepth(EndLoc);
while (!SM.isWrittenInSameFile(StartLoc, EndLoc)) {
- // The region ends in a nested file or macro expansion. Create a
- // separate region for each expansion.
- SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc);
- assert(SM.isWrittenInSameFile(NestedLoc, EndLoc));
-
- if (!isRegionAlreadyAdded(NestedLoc, EndLoc))
- SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
-
- EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
- if (EndLoc.isInvalid())
- llvm::report_fatal_error("File exit not handled before popRegions");
+ bool UnnestStart = StartDepth >= EndDepth;
+ bool UnnestEnd = EndDepth >= StartDepth;
+ if (UnnestEnd) {
+ // The region ends in a nested file or macro expansion. Create a
+ // separate region for each expansion.
+ SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc);
+ assert(SM.isWrittenInSameFile(NestedLoc, EndLoc));
+
+ if (!isRegionAlreadyAdded(NestedLoc, EndLoc))
+ SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
+
+ EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
+ if (EndLoc.isInvalid())
+ llvm::report_fatal_error("File exit not handled before popRegions");
+ EndDepth--;
+ }
+ if (UnnestStart) {
+ // The region begins in a nested file or macro expansion. Create a
+ // separate region for each expansion.
+ SourceLocation NestedLoc = getEndOfFileOrMacro(StartLoc);
+ assert(SM.isWrittenInSameFile(StartLoc, NestedLoc));
+
+ if (!isRegionAlreadyAdded(StartLoc, NestedLoc))
+ SourceRegions.emplace_back(Region.getCounter(), StartLoc, NestedLoc);
+
+ StartLoc = getIncludeOrExpansionLoc(StartLoc);
+ if (StartLoc.isInvalid())
+ llvm::report_fatal_error("File exit not handled before popRegions");
+ StartDepth--;
+ }
}
+ Region.setStartLoc(StartLoc);
Region.setEndLoc(EndLoc);
MostRecentLocation = EndLoc;
@@ -625,12 +656,15 @@ struct CounterCoverageMappingBuilder
return RegionStack.back();
}
- /// Propagate counts through the children of \c S.
- Counter propagateCounts(Counter TopCount, const Stmt *S) {
+ /// Propagate counts through the children of \p S if \p VisitChildren is true.
+ /// Otherwise, only emit a count for \p S itself.
+ Counter propagateCounts(Counter TopCount, const Stmt *S,
+ bool VisitChildren = true) {
SourceLocation StartLoc = getStart(S);
SourceLocation EndLoc = getEnd(S);
size_t Index = pushRegion(TopCount, StartLoc, EndLoc);
- Visit(S);
+ if (VisitChildren)
+ Visit(S);
Counter ExitCount = getRegion().getCounter();
popRegions(Index);
@@ -843,7 +877,16 @@ struct CounterCoverageMappingBuilder
if (Body && SM.isInSystemHeader(SM.getSpellingLoc(getStart(Body))))
return;
- propagateCounts(getRegionCounter(Body), Body);
+ // Do not visit the artificial children nodes of defaulted methods. The
+ // lexer may not be able to report back precise token end locations for
+ // these children nodes (llvm.org/PR39822), and moreover users will not be
+ // able to see coverage for them.
+ bool Defaulted = false;
+ if (auto *Method = dyn_cast<CXXMethodDecl>(D))
+ Defaulted = Method->isDefaulted();
+
+ propagateCounts(getRegionCounter(Body), Body,
+ /*VisitChildren=*/!Defaulted);
assert(RegionStack.empty() && "Regions entered but never exited");
// Discard the last uncompleted deferred region in a decl, if one exists.
diff --git a/lib/CodeGen/CoverageMappingGen.h b/lib/CodeGen/CoverageMappingGen.h
index b08ad896d7..c62db09695 100644
--- a/lib/CodeGen/CoverageMappingGen.h
+++ b/lib/CodeGen/CoverageMappingGen.h
@@ -16,7 +16,6 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Lex/PPCallbacks.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/GlobalValue.h"
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index a7e3c8d58b..b53304528c 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -287,6 +287,7 @@ public:
void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
+ bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
bool ReturnAdjustment) override {
@@ -1562,9 +1563,8 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
Type != Dtor_Base && DD->isVirtual())
Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
else
- Callee =
- CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
- DD);
+ Callee = CGCallee::forDirect(
+ CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)), GD);
CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
This.getPointer(), VTT, VTTTy,
@@ -1750,7 +1750,7 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
VFunc = VFuncLoad;
}
- CGCallee Callee(MethodDecl->getCanonicalDecl(), VFunc);
+ CGCallee Callee(GD, VFunc);
return Callee;
}
@@ -1778,7 +1778,8 @@ void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
}
-bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
+bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
+ const CXXRecordDecl *RD) const {
// We don't emit available_externally vtables if we are in -fapple-kext mode
// because kext mode does not permit devirtualization.
if (CGM.getLangOpts().AppleKext)
@@ -1796,7 +1797,43 @@ bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
// to emit an available_externally copy of vtable.
// FIXME we can still emit a copy of the vtable if we
// can emit definition of the inline functions.
- return !hasAnyUnusedVirtualInlineFunction(RD);
+ if (hasAnyUnusedVirtualInlineFunction(RD))
+ return false;
+
+ // For a class with virtual bases, we must also be able to speculatively
+ // emit the VTT, because CodeGen doesn't have separate notions of "can emit
+ // the vtable" and "can emit the VTT". For a base subobject, this means we
+ // need to be able to emit non-virtual base vtables.
+ if (RD->getNumVBases()) {
+ for (const auto &B : RD->bases()) {
+ auto *BRD = B.getType()->getAsCXXRecordDecl();
+ assert(BRD && "no class for base specifier");
+ if (B.isVirtual() || !BRD->isDynamicClass())
+ continue;
+ if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
+ if (!canSpeculativelyEmitVTableAsBaseClass(RD))
+ return false;
+
+ // For a complete-object vtable (or more specifically, for the VTT), we need
+ // to be able to speculatively emit the vtables of all dynamic virtual bases.
+ for (const auto &B : RD->vbases()) {
+ auto *BRD = B.getType()->getAsCXXRecordDecl();
+ assert(BRD && "no class for base specifier");
+ if (!BRD->isDynamicClass())
+ continue;
+ if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
+ return false;
+ }
+
+ return true;
}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
Address InitialPtr,
@@ -1916,7 +1953,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
// Handle the array cookie specially in ASan.
if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
(expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
- CGM.getCodeGenOpts().SanitizeAddressPoisonClassMemberArrayNewCookie)) {
+ CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
// The store to the CookiePtr does not need to be instrumented.
CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
llvm::FunctionType *FTy =
@@ -2315,11 +2352,13 @@ void CodeGenModule::registerGlobalDtorsWithAtExit() {
FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
SourceLocation());
ASTContext &Ctx = getContext();
+ QualType ReturnTy = Ctx.VoidTy;
+ QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {});
FunctionDecl *FD = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
- &Ctx.Idents.get(GlobalInitFnName), Ctx.VoidTy, nullptr, SC_Static,
+ &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static,
false, false);
- CGF.StartFunction(GlobalDecl(FD), getContext().VoidTy, GlobalInitFn,
+ CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn,
getTypes().arrangeNullaryFunction(), FunctionArgList(),
SourceLocation(), SourceLocation());
@@ -2418,7 +2457,7 @@ ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
WrapperName.str(), &CGM.getModule());
- CGM.SetLLVMFunctionAttributes(nullptr, FI, Wrapper);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
if (VD->hasDefinition())
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
@@ -2472,8 +2511,8 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
CharUnits GuardAlign = CharUnits::One();
Guard->setAlignment(GuardAlign.getQuantity());
- CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, OrderedInits,
- Address(Guard, GuardAlign));
+ CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
+ InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
// On Darwin platforms, use CXX_FAST_TLS calling convention.
if (CGM.getTarget().getTriple().isOSDarwin()) {
InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
@@ -2525,7 +2564,8 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::GlobalVariable::ExternalWeakLinkage,
InitFnName.str(), &CGM.getModule());
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- CGM.SetLLVMFunctionAttributes(nullptr, FI, cast<llvm::Function>(Init));
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
+ cast<llvm::Function>(Init));
}
if (Init) {
@@ -2812,6 +2852,9 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
@@ -3088,7 +3131,7 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
}
assert(isa<ObjCInterfaceType>(Ty));
- // Fall through.
+ LLVM_FALLTHROUGH;
case Type::ObjCInterface:
if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
@@ -4025,7 +4068,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
switch (CatchType.getQualifiers().getObjCLifetime()) {
case Qualifiers::OCL_Strong:
CastExn = CGF.EmitARCRetainNonBlock(CastExn);
- // fallthrough
+ LLVM_FALLTHROUGH;
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
diff --git a/lib/CodeGen/MacroPPCallbacks.cpp b/lib/CodeGen/MacroPPCallbacks.cpp
index 48dea7d54b..013ca15e23 100644
--- a/lib/CodeGen/MacroPPCallbacks.cpp
+++ b/lib/CodeGen/MacroPPCallbacks.cpp
@@ -14,7 +14,8 @@
#include "MacroPPCallbacks.h"
#include "CGDebugInfo.h"
#include "clang/CodeGen/ModuleBuilder.h"
-#include "clang/Parse/Parser.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
using namespace clang;
@@ -88,16 +89,6 @@ SourceLocation MacroPPCallbacks::getCorrectLocation(SourceLocation Loc) {
return SourceLocation();
}
-static bool isBuiltinFile(SourceManager &SM, SourceLocation Loc) {
- StringRef Filename(SM.getPresumedLoc(Loc).getFilename());
- return Filename.equals("<built-in>");
-}
-
-static bool isCommandLineFile(SourceManager &SM, SourceLocation Loc) {
- StringRef Filename(SM.getPresumedLoc(Loc).getFilename());
- return Filename.equals("<command line>");
-}
-
void MacroPPCallbacks::updateStatusToNextScope() {
switch (Status) {
case NoScope:
@@ -127,7 +118,7 @@ void MacroPPCallbacks::FileEntered(SourceLocation Loc) {
updateStatusToNextScope();
return;
case BuiltinScope:
- if (isCommandLineFile(PP.getSourceManager(), Loc))
+ if (PP.getSourceManager().isWrittenInCommandLineFile(Loc))
return;
updateStatusToNextScope();
LLVM_FALLTHROUGH;
@@ -147,7 +138,7 @@ void MacroPPCallbacks::FileExited(SourceLocation Loc) {
default:
llvm_unreachable("Do not expect to exit a file from current scope");
case BuiltinScope:
- if (!isBuiltinFile(PP.getSourceManager(), Loc))
+ if (!PP.getSourceManager().isWrittenInBuiltinFile(Loc))
// Skip next scope and change status to MainFileScope.
Status = MainFileScope;
return;
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 3b894fcdc1..0ad19ad5ab 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -1552,9 +1552,9 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
if (Type == Dtor_Complete && DD->getParent()->getNumVBases() == 0)
Type = Dtor_Base;
- CGCallee Callee = CGCallee::forDirect(
- CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
- DD);
+ CGCallee Callee =
+ CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
+ GlobalDecl(DD, Type));
if (DD->isVirtual()) {
assert(Type != CXXDtorType::Dtor_Deleting &&
@@ -1872,7 +1872,7 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
VFunc = Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
}
- CGCallee Callee(MethodDecl->getCanonicalDecl(), VFunc);
+ CGCallee Callee(GD, VFunc);
return Callee;
}
@@ -3956,7 +3956,8 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
// Call the destructor with our arguments.
llvm::Constant *CalleePtr =
CGM.getAddrOfCXXStructor(CD, StructorType::Complete);
- CGCallee Callee = CGCallee::forDirect(CalleePtr, CD);
+ CGCallee Callee =
+ CGCallee::forDirect(CalleePtr, GlobalDecl(CD, Ctor_Complete));
const CGFunctionInfo &CalleeInfo = CGM.getTypes().arrangeCXXConstructorCall(
Args, CD, Ctor_Complete, ExtraArgs.Prefix, ExtraArgs.Suffix);
CGF.EmitCall(CalleeInfo, Callee, ReturnValueSlot(), Args);
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index 511cf75d6a..1264893ec1 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -17,9 +17,9 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
diff --git a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index c164cec5d9..6f00c836f9 100644
--- a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -14,14 +14,13 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/BackendUtil.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
-#include "clang/Serialization/ASTWriter.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitcode/BitstreamReader.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
@@ -156,6 +155,8 @@ public:
LangOpts.CurrentModule.empty() ? MainFileName : LangOpts.CurrentModule;
CodeGenOpts.setDebugInfo(codegenoptions::FullDebugInfo);
CodeGenOpts.setDebuggerTuning(CI.getCodeGenOpts().getDebuggerTuning());
+ CodeGenOpts.DebugPrefixMap =
+ CI.getInvocation().getCodeGenOpts().DebugPrefixMap;
}
~PCHContainerGenerator() override = default;
diff --git a/lib/CodeGen/SwiftCallingConv.cpp b/lib/CodeGen/SwiftCallingConv.cpp
index b411a501ea..75a0fa5ce1 100644
--- a/lib/CodeGen/SwiftCallingConv.cpp
+++ b/lib/CodeGen/SwiftCallingConv.cpp
@@ -415,6 +415,40 @@ static bool areBytesInSameUnit(CharUnits first, CharUnits second,
== getOffsetAtStartOfUnit(second, chunkSize);
}
+static bool isMergeableEntryType(llvm::Type *type) {
+ // Opaquely-typed memory is always mergeable.
+ if (type == nullptr) return true;
+
+ // Pointers and integers are always mergeable. In theory we should not
+ // merge pointers, but (1) it doesn't currently matter in practice because
+ // the chunk size is never greater than the size of a pointer and (2)
+ // Swift IRGen uses integer types for a lot of things that are "really"
+ // just storing pointers (like Optional<SomePointer>). If we ever have a
+ // target that would otherwise combine pointers, we should put some effort
+ // into fixing those cases in Swift IRGen and then call out pointer types
+ // here.
+
+ // Floating-point and vector types should never be merged.
+ // Most such types are too large and highly-aligned to ever trigger merging
+ // in practice, but it's important for the rule to cover at least 'half'
+ // and 'float', as well as things like small vectors of 'i1' or 'i8'.
+ return (!type->isFloatingPointTy() && !type->isVectorTy());
+}
+
+bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first,
+ const StorageEntry &second,
+ CharUnits chunkSize) {
+ // Only merge entries that overlap the same chunk. We test this first
+ // despite being a bit more expensive because this is the condition that
+ // tends to prevent merging.
+ if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin,
+ chunkSize))
+ return false;
+
+ return (isMergeableEntryType(first.Type) &&
+ isMergeableEntryType(second.Type));
+}
+
void SwiftAggLowering::finish() {
if (Entries.empty()) {
Finished = true;
@@ -425,12 +459,12 @@ void SwiftAggLowering::finish() {
// which is generally the size of a pointer.
const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
- // First pass: if two entries share a chunk, make them both opaque
+ // First pass: if two entries should be merged, make them both opaque
// and stretch one to meet the next.
+ // Also, remember if there are any opaque entries.
bool hasOpaqueEntries = (Entries[0].Type == nullptr);
for (size_t i = 1, e = Entries.size(); i != e; ++i) {
- if (areBytesInSameUnit(Entries[i - 1].End - CharUnits::One(),
- Entries[i].Begin, chunkSize)) {
+ if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
Entries[i - 1].Type = nullptr;
Entries[i].Type = nullptr;
Entries[i - 1].End = Entries[i].Begin;
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index ada42fd2ae..ae080f5bbd 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -19,9 +19,9 @@
#include "CGValue.h"
#include "CodeGenFunction.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
@@ -2337,7 +2337,7 @@ static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
bool Quote = (Lib.find(" ") != StringRef::npos);
std::string ArgStr = Quote ? "\"" : "";
ArgStr += Lib;
- if (!Lib.endswith_lower(".lib"))
+ if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
ArgStr += ".lib";
ArgStr += Quote ? "\"" : "";
return ArgStr;
@@ -3944,18 +3944,39 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
}
- // Bool type is always extended to the ABI, other builtin types are not
- // extended.
- const BuiltinType *BT = Ty->getAs<BuiltinType>();
- if (BT && BT->getKind() == BuiltinType::Bool)
- return ABIArgInfo::getExtend(Ty);
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ // Bool type is always extended to the ABI, other builtin types are not
+ // extended.
+ return ABIArgInfo::getExtend(Ty);
- // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
- // passes them indirectly through memory.
- if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
- const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::x87DoubleExtended())
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ case BuiltinType::LongDouble:
+ // Mingw64 GCC uses the old 80 bit extended precision floating point
+ // unit. It passes them indirectly through memory.
+ if (IsMingw64) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::x87DoubleExtended())
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+ break;
+
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ // If it's a parameter type, the normal ABI rule is that arguments larger
+ // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
+ // even though it isn't particularly efficient.
+ if (!IsReturnType)
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+
+ // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
+ // Clang matches them for compatibility.
+ return ABIArgInfo::getDirect(
+ llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ default:
+ break;
+ }
}
return ABIArgInfo::getDirect();
@@ -4978,13 +4999,21 @@ public:
llvm::Function *Fn = cast<llvm::Function>(GV);
auto Kind = CGM.getCodeGenOpts().getSignReturnAddress();
- if (Kind == CodeGenOptions::SignReturnAddressScope::None)
- return;
+ if (Kind != CodeGenOptions::SignReturnAddressScope::None) {
+ Fn->addFnAttr("sign-return-address",
+ Kind == CodeGenOptions::SignReturnAddressScope::All
+ ? "all"
+ : "non-leaf");
- Fn->addFnAttr("sign-return-address",
- Kind == CodeGenOptions::SignReturnAddressScope::All
- ? "all"
- : "non-leaf");
+ auto Key = CGM.getCodeGenOpts().getSignReturnAddressKey();
+ Fn->addFnAttr("sign-return-address-key",
+ Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
+ ? "a_key"
+ : "b_key");
+ }
+
+ if (CGM.getCodeGenOpts().BranchTargetEnforcement)
+ Fn->addFnAttr("branch-target-enforcement");
}
};
@@ -4993,6 +5022,9 @@ public:
WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
: AArch64TargetCodeGenInfo(CGT, K) {}
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const override {
Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
@@ -5003,6 +5035,14 @@ public:
Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
}
};
+
+void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
}
ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
@@ -8209,6 +8249,137 @@ SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
return false;
}
+// ARC ABI implementation.
+namespace {
+
+class ARCABIInfo : public DefaultABIInfo {
+public:
+ using DefaultABIInfo::DefaultABIInfo;
+
+private:
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
+ if (!State.FreeRegs)
+ return;
+ if (Info.isIndirect() && Info.getInReg())
+ State.FreeRegs--;
+ else if (Info.isDirect() && Info.getInReg()) {
+ unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (sz < State.FreeRegs)
+ State.FreeRegs -= sz;
+ else
+ State.FreeRegs = 0;
+ }
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ CCState State(FI.getCallingConvention());
+ // ARC uses 8 registers to pass arguments.
+ State.FreeRegs = 8;
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ updateState(FI.getReturnInfo(), FI.getReturnType(), State);
+ for (auto &I : FI.arguments()) {
+ I.info = classifyArgumentType(I.type, State.FreeRegs);
+ updateState(I.info, I.type, State);
+ }
+ }
+
+ ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
+ ABIArgInfo getIndirectByValue(QualType Ty) const;
+ ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+};
+
+class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARCTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new ARCABIInfo(CGT)) {}
+};
+
+
+ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
+ return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
+ getNaturalAlignIndirect(Ty, false);
+}
+
+ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
+ // Compute the byval alignment.
+ const unsigned MinABIStackAlignInBytes = 4;
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
+ TypeAlign > MinABIStackAlignInBytes);
+}
+
+Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4), true);
+}
+
+ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
+ uint8_t FreeRegs) const {
+ // Handle the generic C++ ABI.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect)
+ return getIndirectByRef(Ty, FreeRegs > 0);
+
+ if (RAA == CGCXXABI::RAA_DirectInMemory)
+ return getIndirectByValue(Ty);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectByValue(Ty);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+
+ return FreeRegs >= SizeInRegs ?
+ ABIArgInfo::getDirectInReg(Result) :
+ ABIArgInfo::getDirect(Result, 0, nullptr, false);
+ }
+
+ return Ty->isPromotableIntegerType() ?
+ (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) :
+ ABIArgInfo::getExtend(Ty)) :
+ (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() :
+ ABIArgInfo::getDirect());
+}
+
+ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirectInReg();
+
+ // Arguments of size > 4 registers are indirect.
+ auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
+ if (RetSize > 4)
+ return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+} // End anonymous namespace.
//===----------------------------------------------------------------------===//
// XCore ABI Implementation
@@ -9230,6 +9401,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
case llvm::Triple::xcore:
return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
+ case llvm::Triple::arc:
+ return SetCGInfo(new ARCTargetCodeGenInfo(Types));
case llvm::Triple::spir:
case llvm::Triple::spir64:
return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
diff --git a/lib/CodeGen/VarBypassDetector.cpp b/lib/CodeGen/VarBypassDetector.cpp
index 2f8a591a3e..859cdd4282 100644
--- a/lib/CodeGen/VarBypassDetector.cpp
+++ b/lib/CodeGen/VarBypassDetector.cpp
@@ -78,7 +78,7 @@ bool VarBypassDetector::BuildScopeInformation(const Stmt *S,
return false;
++StmtsToSkip;
}
- // Fall through
+ LLVM_FALLTHROUGH;
case Stmt::GotoStmtClass:
FromScopes.push_back({S, ParentScope});
diff --git a/lib/CrossTU/CrossTranslationUnit.cpp b/lib/CrossTU/CrossTranslationUnit.cpp
index e20ea77022..5286b90f93 100644
--- a/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/lib/CrossTU/CrossTranslationUnit.cpp
@@ -17,10 +17,10 @@
#include "clang/CrossTU/CrossTUDiagnostic.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Index/USRGeneration.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
@@ -32,6 +32,47 @@ namespace clang {
namespace cross_tu {
namespace {
+
+#define DEBUG_TYPE "CrossTranslationUnit"
+STATISTIC(NumGetCTUCalled, "The # of getCTUDefinition function called");
+STATISTIC(
+ NumNotInOtherTU,
+ "The # of getCTUDefinition called but the function is not in any other TU");
+STATISTIC(NumGetCTUSuccess,
+ "The # of getCTUDefinition successfully returned the "
+ "requested function's body");
+STATISTIC(NumTripleMismatch, "The # of triple mismatches");
+STATISTIC(NumLangMismatch, "The # of language mismatches");
+
+// Same as Triple's equality operator, but we check a field only if that is
+// known in both instances.
+bool hasEqualKnownFields(const llvm::Triple &Lhs, const llvm::Triple &Rhs) {
+ using llvm::Triple;
+ if (Lhs.getArch() != Triple::UnknownArch &&
+ Rhs.getArch() != Triple::UnknownArch && Lhs.getArch() != Rhs.getArch())
+ return false;
+ if (Lhs.getSubArch() != Triple::NoSubArch &&
+ Rhs.getSubArch() != Triple::NoSubArch &&
+ Lhs.getSubArch() != Rhs.getSubArch())
+ return false;
+ if (Lhs.getVendor() != Triple::UnknownVendor &&
+ Rhs.getVendor() != Triple::UnknownVendor &&
+ Lhs.getVendor() != Rhs.getVendor())
+ return false;
+ if (!Lhs.isOSUnknown() && !Rhs.isOSUnknown() &&
+ Lhs.getOS() != Rhs.getOS())
+ return false;
+ if (Lhs.getEnvironment() != Triple::UnknownEnvironment &&
+ Rhs.getEnvironment() != Triple::UnknownEnvironment &&
+ Lhs.getEnvironment() != Rhs.getEnvironment())
+ return false;
+ if (Lhs.getObjectFormat() != Triple::UnknownObjectFormat &&
+ Rhs.getObjectFormat() != Triple::UnknownObjectFormat &&
+ Lhs.getObjectFormat() != Rhs.getObjectFormat())
+ return false;
+ return true;
+}
+
// FIXME: This class is will be removed after the transition to llvm::Error.
class IndexErrorCategory : public std::error_category {
public:
@@ -55,6 +96,10 @@ public:
return "Failed to load external AST source.";
case index_error_code::failed_to_generate_usr:
return "Failed to generate USR.";
+ case index_error_code::triple_mismatch:
+ return "Triple mismatch";
+ case index_error_code::lang_mismatch:
+ return "Language mismatch";
}
llvm_unreachable("Unrecognized index_error_code.");
}
@@ -149,14 +194,17 @@ CrossTranslationUnitContext::findFunctionInDeclContext(const DeclContext *DC,
llvm::Expected<const FunctionDecl *>
CrossTranslationUnitContext::getCrossTUDefinition(const FunctionDecl *FD,
StringRef CrossTUDir,
- StringRef IndexName) {
+ StringRef IndexName,
+ bool DisplayCTUProgress) {
+ assert(FD && "FD is missing, bad call to this function!");
assert(!FD->hasBody() && "FD has a definition in current translation unit!");
+ ++NumGetCTUCalled;
const std::string LookupFnName = getLookupName(FD);
if (LookupFnName.empty())
return llvm::make_error<IndexError>(
index_error_code::failed_to_generate_usr);
llvm::Expected<ASTUnit *> ASTUnitOrError =
- loadExternalAST(LookupFnName, CrossTUDir, IndexName);
+ loadExternalAST(LookupFnName, CrossTUDir, IndexName, DisplayCTUProgress);
if (!ASTUnitOrError)
return ASTUnitOrError.takeError();
ASTUnit *Unit = *ASTUnitOrError;
@@ -166,6 +214,31 @@ CrossTranslationUnitContext::getCrossTUDefinition(const FunctionDecl *FD,
assert(&Unit->getFileManager() ==
&Unit->getASTContext().getSourceManager().getFileManager());
+ const llvm::Triple &TripleTo = Context.getTargetInfo().getTriple();
+ const llvm::Triple &TripleFrom =
+ Unit->getASTContext().getTargetInfo().getTriple();
+ // The imported AST had been generated for a different target.
+ // Some parts of the triple in the loaded ASTContext can be unknown while the
+ // very same parts in the target ASTContext are known. Thus we check for the
+ // known parts only.
+ if (!hasEqualKnownFields(TripleTo, TripleFrom)) {
+ // TODO: Pass the SourceLocation of the CallExpression for more precise
+ // diagnostics.
+ ++NumTripleMismatch;
+ return llvm::make_error<IndexError>(index_error_code::triple_mismatch,
+ Unit->getMainFileName(), TripleTo.str(),
+ TripleFrom.str());
+ }
+
+ const auto &LangTo = Context.getLangOpts();
+ const auto &LangFrom = Unit->getASTContext().getLangOpts();
+ // FIXME: Currenty we do not support CTU across C++ and C and across
+ // different dialects of C++.
+ if (LangTo.CPlusPlus != LangFrom.CPlusPlus) {
+ ++NumLangMismatch;
+ return llvm::make_error<IndexError>(index_error_code::lang_mismatch);
+ }
+
TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
if (const FunctionDecl *ResultDecl =
findFunctionInDeclContext(TU, LookupFnName))
@@ -176,8 +249,8 @@ CrossTranslationUnitContext::getCrossTUDefinition(const FunctionDecl *FD,
void CrossTranslationUnitContext::emitCrossTUDiagnostics(const IndexError &IE) {
switch (IE.getCode()) {
case index_error_code::missing_index_file:
- Context.getDiagnostics().Report(diag::err_fe_error_opening)
- << IE.getFileName() << "required by the CrossTU functionality";
+ Context.getDiagnostics().Report(diag::err_ctu_error_opening)
+ << IE.getFileName();
break;
case index_error_code::invalid_index_format:
Context.getDiagnostics().Report(diag::err_fnmap_parsing)
@@ -187,13 +260,18 @@ void CrossTranslationUnitContext::emitCrossTUDiagnostics(const IndexError &IE) {
Context.getDiagnostics().Report(diag::err_multiple_def_index)
<< IE.getLineNum();
break;
+ case index_error_code::triple_mismatch:
+ Context.getDiagnostics().Report(diag::warn_ctu_incompat_triple)
+ << IE.getFileName() << IE.getTripleToName() << IE.getTripleFromName();
+ break;
default:
break;
}
}
llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
- StringRef LookupName, StringRef CrossTUDir, StringRef IndexName) {
+ StringRef LookupName, StringRef CrossTUDir, StringRef IndexName,
+ bool DisplayCTUProgress) {
// FIXME: The current implementation only supports loading functions with
// a lookup name from a single translation unit. If multiple
// translation units contains functions with the same lookup name an
@@ -216,8 +294,10 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
}
auto It = FunctionFileMap.find(LookupName);
- if (It == FunctionFileMap.end())
+ if (It == FunctionFileMap.end()) {
+ ++NumNotInOtherTU;
return llvm::make_error<IndexError>(index_error_code::missing_definition);
+ }
StringRef ASTFileName = It->second;
auto ASTCacheEntry = FileASTUnitMap.find(ASTFileName);
if (ASTCacheEntry == FileASTUnitMap.end()) {
@@ -233,6 +313,10 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts()));
Unit = LoadedUnit.get();
FileASTUnitMap[ASTFileName] = std::move(LoadedUnit);
+ if (DisplayCTUProgress) {
+ llvm::errs() << "CTU loaded AST file: "
+ << ASTFileName << "\n";
+ }
} else {
Unit = ASTCacheEntry->second.get();
}
@@ -245,11 +329,16 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
llvm::Expected<const FunctionDecl *>
CrossTranslationUnitContext::importDefinition(const FunctionDecl *FD) {
+ assert(FD->hasBody() && "Functions to be imported should have body.");
+
ASTImporter &Importer = getOrCreateASTImporter(FD->getASTContext());
auto *ToDecl =
- cast<FunctionDecl>(Importer.Import(const_cast<FunctionDecl *>(FD)));
+ cast_or_null<FunctionDecl>(Importer.Import(const_cast<FunctionDecl *>(FD)));
+ if (!ToDecl)
+ return llvm::make_error<IndexError>(index_error_code::failed_import);
assert(ToDecl->hasBody());
assert(FD->hasBody() && "Functions already imported should have body.");
+ ++NumGetCTUSuccess;
return ToDecl;
}
diff --git a/lib/Driver/CMakeLists.txt b/lib/Driver/CMakeLists.txt
index 1e6122e98c..bc96098d0a 100644
--- a/lib/Driver/CMakeLists.txt
+++ b/lib/Driver/CMakeLists.txt
@@ -47,6 +47,7 @@ add_clang_library(clangDriver
ToolChains/Haiku.cpp
ToolChains/HIP.cpp
ToolChains/Hexagon.cpp
+ ToolChains/Hurd.cpp
ToolChains/Linux.cpp
ToolChains/MipsLinux.cpp
ToolChains/MinGW.cpp
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
index ca2525dd07..982d7ecad9 100644
--- a/lib/Driver/Compilation.cpp
+++ b/lib/Driver/Compilation.cpp
@@ -127,7 +127,7 @@ bool Compilation::CleanupFile(const char *File, bool IssueErrors) const {
return true;
}
-bool Compilation::CleanupFileList(const ArgStringList &Files,
+bool Compilation::CleanupFileList(const llvm::opt::ArgStringList &Files,
bool IssueErrors) const {
bool Success = true;
for (const auto &File: Files)
diff --git a/lib/Driver/Distro.cpp b/lib/Driver/Distro.cpp
index 2c4d44faf8..eb537c87e7 100644
--- a/lib/Driver/Distro.cpp
+++ b/lib/Driver/Distro.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Distro.h"
+#include "clang/Basic/LLVM.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
@@ -17,7 +18,7 @@
using namespace clang::driver;
using namespace clang;
-static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
+static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
VFS.getBufferForFile("/etc/lsb-release");
if (File) {
@@ -50,6 +51,7 @@ static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
.Case("artful", Distro::UbuntuArtful)
.Case("bionic", Distro::UbuntuBionic)
.Case("cosmic", Distro::UbuntuCosmic)
+ .Case("disco", Distro::UbuntuDisco)
.Default(Distro::UnknownDistro);
if (Version != Distro::UnknownDistro)
return Version;
@@ -139,4 +141,4 @@ static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
return Distro::UnknownDistro;
}
-Distro::Distro(vfs::FileSystem &VFS) : DistroVal(DetectDistro(VFS)) {}
+Distro::Distro(llvm::vfs::FileSystem &VFS) : DistroVal(DetectDistro(VFS)) {}
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index c5aed55eb9..413956eb18 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -26,6 +26,7 @@
#include "ToolChains/HIP.h"
#include "ToolChains/Haiku.h"
#include "ToolChains/Hexagon.h"
+#include "ToolChains/Hurd.h"
#include "ToolChains/Lanai.h"
#include "ToolChains/Linux.h"
#include "ToolChains/MSVC.h"
@@ -43,7 +44,6 @@
#include "ToolChains/WebAssembly.h"
#include "ToolChains/XCore.h"
#include "clang/Basic/Version.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
@@ -68,18 +68,21 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <memory>
#include <utility>
#if LLVM_ON_UNIX
#include <unistd.h> // getpid
+#include <sysexits.h> // EX_IOERR
#endif
using namespace clang::driver;
@@ -88,7 +91,7 @@ using namespace llvm::opt;
Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS)
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS)
: Opts(createDriverOptTable()), Diags(Diags), VFS(std::move(VFS)),
Mode(GCCMode), SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone),
LTOMode(LTOK_None), ClangExecutable(ClangExecutable),
@@ -98,12 +101,11 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
CCPrintOptions(false), CCPrintHeaders(false), CCLogDiagnostics(false),
CCGenDiagnostics(false), TargetTriple(TargetTriple),
CCCGenericGCCName(""), Saver(Alloc), CheckInputsExist(true),
- CCCUsePCH(true), GenReproducer(false),
- SuppressMissingInputWarning(false) {
+ GenReproducer(false), SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
- this->VFS = vfs::getRealFileSystem();
+ this->VFS = llvm::vfs::getRealFileSystem();
Name = llvm::sys::path::filename(ClangExecutable);
Dir = llvm::sys::path::parent_path(ClangExecutable);
@@ -164,6 +166,7 @@ void Driver::setDriverModeFromOption(StringRef Opt) {
}
InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
+ bool IsClCompatMode,
bool &ContainsError) {
llvm::PrettyStackTraceString CrashInfo("Command line argument parsing");
ContainsError = false;
@@ -171,7 +174,7 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
unsigned IncludedFlagsBitmask;
unsigned ExcludedFlagsBitmask;
std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
- getIncludeExcludeOptionFlagMasks();
+ getIncludeExcludeOptionFlagMasks(IsClCompatMode);
unsigned MissingArgIndex, MissingArgCount;
InputArgList Args =
@@ -399,6 +402,13 @@ static llvm::Triple computeTargetTriple(const Driver &D,
llvm::Triple Target(llvm::Triple::normalize(TargetTriple));
+ // GNU/Hurd's triples should have been -hurd-gnu*, but were historically made
+ // -gnu* only, and we can not change this, so we have to detect that case as
+ // being the Hurd OS.
+ if (TargetTriple.find("-unknown-gnu") != StringRef::npos ||
+ TargetTriple.find("-pc-gnu") != StringRef::npos)
+ Target.setOSName("hurd");
+
// Handle Apple-specific options available here.
if (Target.isOSBinFormatMachO()) {
// If an explicit Darwin arch name is given, that trumps all.
@@ -484,12 +494,25 @@ static llvm::Triple computeTargetTriple(const Driver &D,
// If target is MIPS adjust the target triple
// accordingly to provided ABI name.
A = Args.getLastArg(options::OPT_mabi_EQ);
- if (A && Target.isMIPS())
- Target = llvm::StringSwitch<llvm::Triple>(A->getValue())
- .Case("32", Target.get32BitArchVariant())
- .Case("n32", Target.get64BitArchVariant())
- .Case("64", Target.get64BitArchVariant())
- .Default(Target);
+ if (A && Target.isMIPS()) {
+ StringRef ABIName = A->getValue();
+ if (ABIName == "32") {
+ Target = Target.get32BitArchVariant();
+ if (Target.getEnvironment() == llvm::Triple::GNUABI64 ||
+ Target.getEnvironment() == llvm::Triple::GNUABIN32)
+ Target.setEnvironment(llvm::Triple::GNU);
+ } else if (ABIName == "n32") {
+ Target = Target.get64BitArchVariant();
+ if (Target.getEnvironment() == llvm::Triple::GNU ||
+ Target.getEnvironment() == llvm::Triple::GNUABI64)
+ Target.setEnvironment(llvm::Triple::GNUABIN32);
+ } else if (ABIName == "64") {
+ Target = Target.get64BitArchVariant();
+ if (Target.getEnvironment() == llvm::Triple::GNU ||
+ Target.getEnvironment() == llvm::Triple::GNUABIN32)
+ Target.setEnvironment(llvm::Triple::GNUABI64);
+ }
+ }
return Target;
}
@@ -715,7 +738,7 @@ bool Driver::readConfigFile(StringRef FileName) {
ConfigFile = CfgFileName.str();
bool ContainErrors;
CfgOptions = llvm::make_unique<InputArgList>(
- ParseArgStrings(NewCfgArgs, ContainErrors));
+ ParseArgStrings(NewCfgArgs, IsCLMode(), ContainErrors));
if (ContainErrors) {
CfgOptions.reset();
return true;
@@ -909,7 +932,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// Arguments specified in command line.
bool ContainsError;
CLOptions = llvm::make_unique<InputArgList>(
- ParseArgStrings(ArgList.slice(1), ContainsError));
+ ParseArgStrings(ArgList.slice(1), IsCLMode(), ContainsError));
// Try parsing configuration file.
if (!ContainsError)
@@ -919,21 +942,47 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// All arguments, from both config file and command line.
InputArgList Args = std::move(HasConfigFile ? std::move(*CfgOptions)
: std::move(*CLOptions));
- if (HasConfigFile)
- for (auto *Opt : *CLOptions) {
- if (Opt->getOption().matches(options::OPT_config))
- continue;
+
+ auto appendOneArg = [&Args](const Arg *Opt, const Arg *BaseArg) {
unsigned Index = Args.MakeIndex(Opt->getSpelling());
- const Arg *BaseArg = &Opt->getBaseArg();
- if (BaseArg == Opt)
- BaseArg = nullptr;
Arg *Copy = new llvm::opt::Arg(Opt->getOption(), Opt->getSpelling(),
Index, BaseArg);
Copy->getValues() = Opt->getValues();
if (Opt->isClaimed())
Copy->claim();
Args.append(Copy);
+ };
+
+ if (HasConfigFile)
+ for (auto *Opt : *CLOptions) {
+ if (Opt->getOption().matches(options::OPT_config))
+ continue;
+ const Arg *BaseArg = &Opt->getBaseArg();
+ if (BaseArg == Opt)
+ BaseArg = nullptr;
+ appendOneArg(Opt, BaseArg);
+ }
+
+ // In CL mode, look for any pass-through arguments
+ if (IsCLMode() && !ContainsError) {
+ SmallVector<const char *, 16> CLModePassThroughArgList;
+ for (const auto *A : Args.filtered(options::OPT__SLASH_clang)) {
+ A->claim();
+ CLModePassThroughArgList.push_back(A->getValue());
+ }
+
+ if (!CLModePassThroughArgList.empty()) {
+ // Parse any pass through args using default clang processing rather
+ // than clang-cl processing.
+ auto CLModePassThroughOptions = llvm::make_unique<InputArgList>(
+ ParseArgStrings(CLModePassThroughArgList, false, ContainsError));
+
+ if (!ContainsError)
+ for (auto *Opt : *CLModePassThroughOptions) {
+ appendOneArg(Opt, nullptr);
+ }
}
+ }
// FIXME: This stuff needs to go into the Compilation, not the driver.
bool CCCPrintPhases;
@@ -957,8 +1006,6 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
CCCPrintBindings = Args.hasArg(options::OPT_ccc_print_bindings);
if (const Arg *A = Args.getLastArg(options::OPT_ccc_gcc_name))
CCCGenericGCCName = A->getValue();
- CCCUsePCH =
- Args.hasFlag(options::OPT_ccc_pch_is_pch, options::OPT_ccc_pch_is_pth);
GenReproducer = Args.hasFlag(options::OPT_gen_reproducer,
options::OPT_fno_crash_diagnostics,
!!::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"));
@@ -1387,8 +1434,9 @@ int Driver::ExecuteCompilation(
// Otherwise, remove result files and print extra information about abnormal
// failures.
+ int Res = 0;
for (const auto &CmdPair : FailingCommands) {
- int Res = CmdPair.first;
+ int CommandRes = CmdPair.first;
const Command *FailingCommand = CmdPair.second;
// Remove result files if we're not saving temps.
@@ -1397,10 +1445,19 @@ int Driver::ExecuteCompilation(
C.CleanupFileMap(C.getResultFiles(), JA, true);
// Failure result files are valid unless we crashed.
- if (Res < 0)
+ if (CommandRes < 0)
C.CleanupFileMap(C.getFailureResultFiles(), JA, true);
}
+#if LLVM_ON_UNIX
+ // llvm/lib/Support/Unix/Signals.inc will exit with a special return code
+ // for SIGPIPE. Do not print diagnostics for this case.
+ if (CommandRes == EX_IOERR) {
+ Res = CommandRes;
+ continue;
+ }
+#endif
+
// Print extra information about abnormal failures, if possible.
//
// This is ad-hoc, but we don't want to be excessively noisy. If the result
@@ -1410,30 +1467,31 @@ int Driver::ExecuteCompilation(
// diagnostics, so always print the diagnostic there.
const Tool &FailingTool = FailingCommand->getCreator();
- if (!FailingCommand->getCreator().hasGoodDiagnostics() || Res != 1) {
+ if (!FailingCommand->getCreator().hasGoodDiagnostics() || CommandRes != 1) {
// FIXME: See FIXME above regarding result code interpretation.
- if (Res < 0)
+ if (CommandRes < 0)
Diag(clang::diag::err_drv_command_signalled)
<< FailingTool.getShortName();
else
- Diag(clang::diag::err_drv_command_failed) << FailingTool.getShortName()
- << Res;
+ Diag(clang::diag::err_drv_command_failed)
+ << FailingTool.getShortName() << CommandRes;
}
}
- return 0;
+ return Res;
}
void Driver::PrintHelp(bool ShowHidden) const {
unsigned IncludedFlagsBitmask;
unsigned ExcludedFlagsBitmask;
std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
- getIncludeExcludeOptionFlagMasks();
+ getIncludeExcludeOptionFlagMasks(IsCLMode());
ExcludedFlagsBitmask |= options::NoDriverOption;
if (!ShowHidden)
ExcludedFlagsBitmask |= HelpHidden;
- getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(),
+ std::string Usage = llvm::formatv("{0} [options] file...", Name).str();
+ getOpts().PrintHelp(llvm::outs(), Usage.c_str(), DriverTitle.c_str(),
IncludedFlagsBitmask, ExcludedFlagsBitmask,
/*ShowAllAliases=*/false);
}
@@ -1482,6 +1540,11 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
unsigned short DisableFlags =
options::NoDriverOption | options::Unsupported | options::Ignored;
+ // Distinguish "--autocomplete=-someflag" and "--autocomplete=-someflag,"
+ // because the latter indicates that the user put space before pushing tab
+ // which should end up in a file completion.
+ const bool HasSpace = PassedFlags.endswith(",");
+
// Parse PassedFlags by "," as all the command-line flags are passed to this
// function separated by ","
StringRef TargetFlags = PassedFlags;
@@ -1508,7 +1571,19 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
if (SuggestedCompletions.empty())
SuggestedCompletions = Opts->suggestValueCompletions(Cur, "");
- if (SuggestedCompletions.empty()) {
+ // If Flags were empty, it means the user typed `clang [tab]` where we should
+ // list all possible flags. If there was no value completion and the user
+ // pressed tab after a space, we should fall back to a file completion.
+ // We're printing a newline to be consistent with what we print at the end of
+ // this function.
+ if (SuggestedCompletions.empty() && HasSpace && !Flags.empty()) {
+ llvm::outs() << '\n';
+ return;
+ }
+
+ // When flag ends with '=' and there was no value completion, return empty
+ // string and fall back to the file autocompletion.
+ if (SuggestedCompletions.empty() && !Cur.endswith("=")) {
// If the flag is in the form of "--autocomplete=-foo",
// we were requested to print out all option names that start with "-foo".
// For example, "--autocomplete=-fsyn" is expanded to "-fsyntax-only".
@@ -1902,7 +1977,7 @@ static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args,
}
}
- if (llvm::sys::fs::exists(Twine(Path)))
+ if (D.getVFS().exists(Path))
return true;
if (D.IsCLMode()) {
@@ -2486,11 +2561,13 @@ class OffloadingActionBuilder final {
class HIPActionBuilder final : public CudaActionBuilderBase {
/// The linker inputs obtained for each device arch.
SmallVector<ActionList, 8> DeviceLinkerInputs;
+ bool Relocatable;
public:
HIPActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
- : CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP) {}
+ : CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP),
+ Relocatable(false) {}
bool canUseBundlerUnbundler() const override { return true; }
@@ -2499,23 +2576,70 @@ class OffloadingActionBuilder final {
phases::ID CurPhase, phases::ID FinalPhase,
PhasesTy &Phases) override {
// amdgcn does not support linking of object files, therefore we skip
- // backend and assemble phases to output LLVM IR.
- if (CudaDeviceActions.empty() || CurPhase == phases::Backend ||
+ // backend and assemble phases to output LLVM IR. Except for generating
+ // non-relocatable device coee, where we generate fat binary for device
+ // code and pass to host in Backend phase.
+ if (CudaDeviceActions.empty() ||
+ (CurPhase == phases::Backend && Relocatable) ||
CurPhase == phases::Assemble)
return ABRT_Success;
- assert((CurPhase == phases::Link ||
+ assert(((CurPhase == phases::Link && Relocatable) ||
CudaDeviceActions.size() == GpuArchList.size()) &&
"Expecting one action per GPU architecture.");
assert(!CompileHostOnly &&
"Not expecting CUDA actions in host-only compilation.");
- // Save CudaDeviceActions to DeviceLinkerInputs for each GPU subarch.
- // This happens to each device action originated from each input file.
- // Later on, device actions in DeviceLinkerInputs are used to create
- // device link actions in appendLinkDependences and the created device
- // link actions are passed to the offload action as device dependence.
- if (CurPhase == phases::Link) {
+ if (!Relocatable && CurPhase == phases::Backend) {
+ // If we are in backend phase, we attempt to generate the fat binary.
+ // We compile each arch to IR and use a link action to generate code
+ // object containing ISA. Then we use a special "link" action to create
+ // a fat binary containing all the code objects for different GPU's.
+ // The fat binary is then an input to the host action.
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ // Create a link action to link device IR with device library
+ // and generate ISA.
+ ActionList AL;
+ AL.push_back(CudaDeviceActions[I]);
+ CudaDeviceActions[I] =
+ C.MakeAction<LinkJobAction>(AL, types::TY_Image);
+
+ // OffloadingActionBuilder propagates device arch until an offload
+ // action. Since the next action for creating fatbin does
+ // not have device arch, whereas the above link action and its input
+ // have device arch, an offload action is needed to stop the null
+ // device arch of the next action being propagated to the above link
+ // action.
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*CudaDeviceActions[I], *ToolChains.front(),
+ CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
+ CudaDeviceActions[I] = C.MakeAction<OffloadAction>(
+ DDep, CudaDeviceActions[I]->getType());
+ }
+ // Create HIP fat binary with a special "link" action.
+ CudaFatBinary =
+ C.MakeAction<LinkJobAction>(CudaDeviceActions,
+ types::TY_HIP_FATBIN);
+
+ if (!CompileDeviceOnly) {
+ DA.add(*CudaFatBinary, *ToolChains.front(), /*BoundArch=*/nullptr,
+ AssociatedOffloadKind);
+ // Clear the fat binary, it is already a dependence to an host
+ // action.
+ CudaFatBinary = nullptr;
+ }
+
+ // Remove the CUDA actions as they are already connected to an host
+ // action or fat binary.
+ CudaDeviceActions.clear();
+
+ return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
+ } else if (CurPhase == phases::Link) {
+ // Save CudaDeviceActions to DeviceLinkerInputs for each GPU subarch.
+ // This happens to each device action originated from each input file.
+ // Later on, device actions in DeviceLinkerInputs are used to create
+ // device link actions in appendLinkDependences and the created device
+ // link actions are passed to the offload action as device dependence.
DeviceLinkerInputs.resize(CudaDeviceActions.size());
auto LI = DeviceLinkerInputs.begin();
for (auto *A : CudaDeviceActions) {
@@ -2548,6 +2672,13 @@ class OffloadingActionBuilder final {
++I;
}
}
+
+ bool initialize() override {
+ Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
+ options::OPT_fno_gpu_rdc, /*Default=*/false);
+
+ return CudaActionBuilderBase::initialize();
+ }
};
/// OpenMP action builder. The host bitcode is passed to the device frontend
@@ -2890,8 +3021,10 @@ public:
}
// If we can use the bundler, replace the host action by the bundling one in
- // the resulting list. Otherwise, just append the device actions.
- if (CanUseBundler && !OffloadAL.empty()) {
+ // the resulting list. Otherwise, just append the device actions. For
+ // device only compilation, HostAction is a null pointer, therefore only do
+ // this when HostAction is not a null pointer.
+ if (CanUseBundler && HostAction && !OffloadAL.empty()) {
// Add the host action to the list in order to create the bundling action.
OffloadAL.push_back(HostAction);
@@ -4221,7 +4354,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
}
std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
- // Seach for Name in a list of paths.
+ // Search for Name in a list of paths.
auto SearchPaths = [&](const llvm::SmallVectorImpl<std::string> &P)
-> llvm::Optional<std::string> {
// Respect a limited subset of the '-Bprefix' functionality in GCC by
@@ -4446,6 +4579,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::Contiki:
TC = llvm::make_unique<toolchains::Contiki>(*this, Target, Args);
break;
+ case llvm::Triple::Hurd:
+ TC = llvm::make_unique<toolchains::Hurd>(*this, Target, Args);
+ break;
default:
// Of these targets, Hexagon is the only one that might have
// an OS of Linux, in which case it got handled above already.
@@ -4581,11 +4717,11 @@ bool Driver::GetReleaseVersion(StringRef Str,
return false;
}
-std::pair<unsigned, unsigned> Driver::getIncludeExcludeOptionFlagMasks() const {
+std::pair<unsigned, unsigned> Driver::getIncludeExcludeOptionFlagMasks(bool IsClCompatMode) const {
unsigned IncludedFlagsBitmask = 0;
unsigned ExcludedFlagsBitmask = options::NoDriverOption;
- if (Mode == CLMode) {
+ if (IsClCompatMode) {
// Include CL and Core options.
IncludedFlagsBitmask |= options::CLOption;
IncludedFlagsBitmask |= options::CoreOption;
diff --git a/lib/Driver/Job.cpp b/lib/Driver/Job.cpp
index bd1a9bd8e3..8d1dfbe12d 100644
--- a/lib/Driver/Job.cpp
+++ b/lib/Driver/Job.cpp
@@ -35,7 +35,8 @@ using namespace clang;
using namespace driver;
Command::Command(const Action &Source, const Tool &Creator,
- const char *Executable, const ArgStringList &Arguments,
+ const char *Executable,
+ const llvm::opt::ArgStringList &Arguments,
ArrayRef<InputInfo> Inputs)
: Source(Source), Creator(Creator), Executable(Executable),
Arguments(Arguments) {
@@ -315,6 +316,12 @@ void Command::setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) {
int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
std::string *ErrMsg, bool *ExecutionFailed) const {
+ if (PrintInputFilenames) {
+ for (const char *Arg : InputFilenames)
+ llvm::outs() << llvm::sys::path::filename(Arg) << "\n";
+ llvm::outs().flush();
+ }
+
SmallVector<const char*, 128> Argv;
Optional<ArrayRef<StringRef>> Env;
@@ -366,7 +373,7 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
FallbackCommand::FallbackCommand(const Action &Source_, const Tool &Creator_,
const char *Executable_,
- const ArgStringList &Arguments_,
+ const llvm::opt::ArgStringList &Arguments_,
ArrayRef<InputInfo> Inputs,
std::unique_ptr<Command> Fallback_)
: Command(Source_, Creator_, Executable_, Arguments_, Inputs),
@@ -405,11 +412,9 @@ int FallbackCommand::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
return SecondaryStatus;
}
-ForceSuccessCommand::ForceSuccessCommand(const Action &Source_,
- const Tool &Creator_,
- const char *Executable_,
- const ArgStringList &Arguments_,
- ArrayRef<InputInfo> Inputs)
+ForceSuccessCommand::ForceSuccessCommand(
+ const Action &Source_, const Tool &Creator_, const char *Executable_,
+ const llvm::opt::ArgStringList &Arguments_, ArrayRef<InputInfo> Inputs)
: Command(Source_, Creator_, Executable_, Arguments_, Inputs) {}
void ForceSuccessCommand::Print(raw_ostream &OS, const char *Terminator,
diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp
index c2e63ded6c..4e0d7491bb 100644
--- a/lib/Driver/SanitizerArgs.cpp
+++ b/lib/Driver/SanitizerArgs.cpp
@@ -47,7 +47,7 @@ enum : SanitizerMask {
TrappingDefault = CFI,
CFIClasses =
CFIVCall | CFINVCall | CFIMFCall | CFIDerivedCast | CFIUnrelatedCast,
- CompatibleWithMinimalRuntime = TrappingSupported | Scudo,
+ CompatibleWithMinimalRuntime = TrappingSupported | Scudo | ShadowCallStack,
};
enum CoverageFeature {
@@ -376,12 +376,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
KernelAddress | Efficiency),
std::make_pair(SafeStack, Address | HWAddress | Leak | Thread | Memory |
KernelAddress | Efficiency),
- std::make_pair(ShadowCallStack, Address | HWAddress | Leak | Thread |
- Memory | KernelAddress | Efficiency |
- SafeStack),
std::make_pair(KernelHWAddress, Address | HWAddress | Leak | Thread |
Memory | KernelAddress | Efficiency |
- SafeStack | ShadowCallStack),
+ SafeStack),
std::make_pair(KernelMemory, Address | HWAddress | Leak | Thread |
Memory | KernelAddress | Efficiency |
Scudo | SafeStack)};
@@ -724,12 +721,22 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
options::OPT_fsanitize_address_use_after_scope,
options::OPT_fno_sanitize_address_use_after_scope, AsanUseAfterScope);
+ AsanPoisonCustomArrayCookie = Args.hasFlag(
+ options::OPT_fsanitize_address_poison_custom_array_cookie,
+ options::OPT_fno_sanitize_address_poison_custom_array_cookie,
+ AsanPoisonCustomArrayCookie);
+
// As a workaround for a bug in gold 2.26 and earlier, dead stripping of
// globals in ASan is disabled by default on ELF targets.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19002
AsanGlobalsDeadStripping =
!TC.getTriple().isOSBinFormatELF() || TC.getTriple().isOSFuchsia() ||
Args.hasArg(options::OPT_fsanitize_address_globals_dead_stripping);
+
+ AsanUseOdrIndicator =
+ Args.hasFlag(options::OPT_fsanitize_address_use_odr_indicator,
+ options::OPT_fno_sanitize_address_use_odr_indicator,
+ AsanUseOdrIndicator);
} else {
AsanUseAfterScope = false;
}
@@ -897,9 +904,15 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (AsanUseAfterScope)
CmdArgs.push_back("-fsanitize-address-use-after-scope");
+ if (AsanPoisonCustomArrayCookie)
+ CmdArgs.push_back("-fsanitize-address-poison-custom-array-cookie");
+
if (AsanGlobalsDeadStripping)
CmdArgs.push_back("-fsanitize-address-globals-dead-stripping");
+ if (AsanUseOdrIndicator)
+ CmdArgs.push_back("-fsanitize-address-use-odr-indicator");
+
// MSan: Workaround for PR16386.
// ASan: This is mainly to help LSan with cases such as
// https://github.com/google/sanitizers/issues/373
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index 8933e905fc..88a627eab6 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -13,7 +13,6 @@
#include "ToolChains/Clang.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Sanitizers.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Driver.h"
@@ -39,6 +38,7 @@
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <cassert>
#include <cstddef>
#include <cstring>
@@ -99,7 +99,9 @@ void ToolChain::setTripleEnvironment(llvm::Triple::EnvironmentType Env) {
ToolChain::~ToolChain() = default;
-vfs::FileSystem &ToolChain::getVFS() const { return getDriver().getVFS(); }
+llvm::vfs::FileSystem &ToolChain::getVFS() const {
+ return getDriver().getVFS();
+}
bool ToolChain::useIntegratedAs() const {
return Args.hasFlag(options::OPT_fintegrated_as,
@@ -367,8 +369,10 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
TT.isWindowsMSVCEnvironment() || TT.isWindowsItaniumEnvironment();
const char *Prefix = IsITANMSVCWindows ? "" : "lib";
- const char *Suffix = Shared ? (Triple.isOSWindows() ? ".dll" : ".so")
+ const char *Suffix = Shared ? (Triple.isOSWindows() ? ".lib" : ".so")
: (IsITANMSVCWindows ? ".lib" : ".a");
+ if (Shared && Triple.isWindowsGNUEnvironment())
+ Suffix = ".dll.a";
for (const auto &LibPath : getLibraryPaths()) {
SmallString<128> P(LibPath);
@@ -399,19 +403,23 @@ std::string ToolChain::getArchSpecificLibPath() const {
}
bool ToolChain::needsProfileRT(const ArgList &Args) {
- if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
- false) ||
+ if (needsGCovInstrumentation(Args) ||
Args.hasArg(options::OPT_fprofile_generate) ||
Args.hasArg(options::OPT_fprofile_generate_EQ) ||
Args.hasArg(options::OPT_fprofile_instr_generate) ||
Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
- Args.hasArg(options::OPT_fcreate_profile) ||
- Args.hasArg(options::OPT_coverage))
+ Args.hasArg(options::OPT_fcreate_profile))
return true;
return false;
}
+bool ToolChain::needsGCovInstrumentation(const llvm::opt::ArgList &Args) {
+ return Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
+ Args.hasArg(options::OPT_coverage);
+}
+
Tool *ToolChain::SelectTool(const JobAction &JA) const {
if (getDriver().ShouldUseClangCompiler(JA)) return getClang();
Action::ActionClass AC = JA.getKind();
@@ -596,7 +604,7 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
// Check to see if an explicit choice to use thumb has been made via
// -mthumb. For assembler files we must check for -mthumb in the options
- // passed to the assember via -Wa or -Xassembler.
+ // passed to the assembler via -Wa or -Xassembler.
bool IsThumb = false;
if (InputType != types::TY_PP_Asm)
IsThumb = Args.hasFlag(options::OPT_mthumb, options::OPT_mno_thumb,
diff --git a/lib/Driver/ToolChains/AMDGPU.cpp b/lib/Driver/ToolChains/AMDGPU.cpp
index 451f31dd52..a421a09891 100644
--- a/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/lib/Driver/ToolChains/AMDGPU.cpp
@@ -104,7 +104,7 @@ void AMDGPUToolChain::addClangTargetOptions(
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const {
// Default to "hidden" visibility, as object level linking will not be
- // supported for the forseeable future.
+ // supported for the foreseeable future.
if (!DriverArgs.hasArg(options::OPT_fvisibility_EQ,
options::OPT_fvisibility_ms_compat)) {
CC1Args.push_back("-fvisibility");
diff --git a/lib/Driver/ToolChains/Arch/AArch64.cpp b/lib/Driver/ToolChains/Arch/AArch64.cpp
index c76a31a315..1dc516b963 100644
--- a/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -219,6 +219,87 @@ fp16_fml_fallthrough:
Features.push_back("+fullfp16");
}
+ // FIXME: this needs reimplementation too after the TargetParser rewrite
+ //
+ // Context sensitive meaning of Crypto:
+ // 1) For Arch >= ARMv8.4a: crypto = sm4 + sha3 + sha2 + aes
+ // 2) For Arch <= ARMv8.3a: crypto = sha2 + aes
+ const auto ItBegin = Features.begin();
+ const auto ItEnd = Features.end();
+ const auto ItRBegin = Features.rbegin();
+ const auto ItREnd = Features.rend();
+ const auto ItRCrypto = std::find(ItRBegin, ItREnd, "+crypto");
+ const auto ItRNoCrypto = std::find(ItRBegin, ItREnd, "-crypto");
+ const auto HasCrypto = ItRCrypto != ItREnd;
+ const auto HasNoCrypto = ItRNoCrypto != ItREnd;
+ const ptrdiff_t PosCrypto = ItRCrypto - ItRBegin;
+ const ptrdiff_t PosNoCrypto = ItRNoCrypto - ItRBegin;
+
+ bool NoCrypto = false;
+ if (HasCrypto && HasNoCrypto) {
+ if (PosNoCrypto < PosCrypto)
+ NoCrypto = true;
+ }
+
+ if (std::find(ItBegin, ItEnd, "+v8.4a") != ItEnd) {
+ if (HasCrypto && !NoCrypto) {
+ // Check if we have NOT disabled an algorithm with something like:
+ // +crypto, -algorithm
+ // And if "-algorithm" does not occur, we enable that crypto algorithm.
+ const bool HasSM4 = (std::find(ItBegin, ItEnd, "-sm4") == ItEnd);
+ const bool HasSHA3 = (std::find(ItBegin, ItEnd, "-sha3") == ItEnd);
+ const bool HasSHA2 = (std::find(ItBegin, ItEnd, "-sha2") == ItEnd);
+ const bool HasAES = (std::find(ItBegin, ItEnd, "-aes") == ItEnd);
+ if (HasSM4)
+ Features.push_back("+sm4");
+ if (HasSHA3)
+ Features.push_back("+sha3");
+ if (HasSHA2)
+ Features.push_back("+sha2");
+ if (HasAES)
+ Features.push_back("+aes");
+ } else if (HasNoCrypto) {
+ // Check if we have NOT enabled a crypto algorithm with something like:
+ // -crypto, +algorithm
+ // And if "+algorithm" does not occur, we disable that crypto algorithm.
+ const bool HasSM4 = (std::find(ItBegin, ItEnd, "+sm4") != ItEnd);
+ const bool HasSHA3 = (std::find(ItBegin, ItEnd, "+sha3") != ItEnd);
+ const bool HasSHA2 = (std::find(ItBegin, ItEnd, "+sha2") != ItEnd);
+ const bool HasAES = (std::find(ItBegin, ItEnd, "+aes") != ItEnd);
+ if (!HasSM4)
+ Features.push_back("-sm4");
+ if (!HasSHA3)
+ Features.push_back("-sha3");
+ if (!HasSHA2)
+ Features.push_back("-sha2");
+ if (!HasAES)
+ Features.push_back("-aes");
+ }
+ } else {
+ if (HasCrypto && !NoCrypto) {
+ const bool HasSHA2 = (std::find(ItBegin, ItEnd, "-sha2") == ItEnd);
+ const bool HasAES = (std::find(ItBegin, ItEnd, "-aes") == ItEnd);
+ if (HasSHA2)
+ Features.push_back("+sha2");
+ if (HasAES)
+ Features.push_back("+aes");
+ } else if (HasNoCrypto) {
+ const bool HasSHA2 = (std::find(ItBegin, ItEnd, "+sha2") != ItEnd);
+ const bool HasAES = (std::find(ItBegin, ItEnd, "+aes") != ItEnd);
+ const bool HasV82a = (std::find(ItBegin, ItEnd, "+v8.2a") != ItEnd);
+ const bool HasV83a = (std::find(ItBegin, ItEnd, "+v8.3a") != ItEnd);
+ const bool HasV84a = (std::find(ItBegin, ItEnd, "+v8.4a") != ItEnd);
+ if (!HasSHA2)
+ Features.push_back("-sha2");
+ if (!HasAES)
+ Features.push_back("-aes");
+ if (HasV82a || HasV83a || HasV84a) {
+ Features.push_back("-sm4");
+ Features.push_back("-sha3");
+ }
+ }
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
options::OPT_munaligned_access))
if (A->getOption().matches(options::OPT_mno_unaligned_access))
diff --git a/lib/Driver/ToolChains/Arch/ARM.cpp b/lib/Driver/ToolChains/Arch/ARM.cpp
index e8c8250ed9..f55efc1a22 100644
--- a/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -378,6 +378,13 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
Features);
} else if (FPUArg) {
getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
+ } else if (Triple.isAndroid() && getARMSubArchVersionNumber(Triple) >= 7) {
+ // Android mandates minimum FPU requirements based on OS version.
+ const char *AndroidFPU =
+ Triple.isAndroidVersionLT(23) ? "vfpv3-d16" : "neon";
+ if (!llvm::ARM::getFPUFeatures(llvm::ARM::parseFPU(AndroidFPU), Features))
+ D.Diag(clang::diag::err_drv_clang_unsupported)
+ << std::string("-mfpu=") + AndroidFPU;
}
// Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=.
@@ -444,6 +451,26 @@ fp16_fml_fallthrough:
Features.push_back("-crc");
}
+ // For Arch >= ARMv8.0: crypto = sha2 + aes
+ // FIXME: this needs reimplementation after the TargetParser rewrite
+ if (ArchName.find_lower("armv8a") != StringRef::npos ||
+ ArchName.find_lower("armv8.1a") != StringRef::npos ||
+ ArchName.find_lower("armv8.2a") != StringRef::npos ||
+ ArchName.find_lower("armv8.3a") != StringRef::npos ||
+ ArchName.find_lower("armv8.4a") != StringRef::npos) {
+ if (ArchName.find_lower("+crypto") != StringRef::npos) {
+ if (ArchName.find_lower("+nosha2") == StringRef::npos)
+ Features.push_back("+sha2");
+ if (ArchName.find_lower("+noaes") == StringRef::npos)
+ Features.push_back("+aes");
+ } else if (ArchName.find_lower("-crypto") != StringRef::npos) {
+ if (ArchName.find_lower("+sha2") == StringRef::npos)
+ Features.push_back("-sha2");
+ if (ArchName.find_lower("+aes") == StringRef::npos)
+ Features.push_back("-aes");
+ }
+ }
+
// Look for the last occurrence of -mlong-calls or -mno-long-calls. If
// neither options are specified, see if we are compiling for kernel/kext and
// decide whether to pass "+long-calls" based on the OS and its version.
@@ -616,7 +643,7 @@ StringRef arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
return llvm::ARM::getSubArch(ArchKind);
}
-void arm::appendEBLinkFlags(const ArgList &Args, ArgStringList &CmdArgs,
+void arm::appendBE8LinkFlag(const ArgList &Args, ArgStringList &CmdArgs,
const llvm::Triple &Triple) {
if (Args.hasArg(options::OPT_r))
return;
diff --git a/lib/Driver/ToolChains/Arch/ARM.h b/lib/Driver/ToolChains/Arch/ARM.h
index c1dc168840..9f0dc4ea2e 100644
--- a/lib/Driver/ToolChains/Arch/ARM.h
+++ b/lib/Driver/ToolChains/Arch/ARM.h
@@ -29,7 +29,7 @@ StringRef getARMCPUForMArch(llvm::StringRef Arch, const llvm::Triple &Triple);
StringRef getLLVMArchSuffixForARM(llvm::StringRef CPU, llvm::StringRef Arch,
const llvm::Triple &Triple);
-void appendEBLinkFlags(const llvm::opt::ArgList &Args,
+void appendBE8LinkFlag(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
const llvm::Triple &Triple);
enum class ReadTPMode {
diff --git a/lib/Driver/ToolChains/Arch/Mips.cpp b/lib/Driver/ToolChains/Arch/Mips.cpp
index 6d814631d0..a1591039db 100644
--- a/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -35,6 +35,11 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
DefMips64CPU = "mips64r6";
}
+ if (Triple.getSubArch() == llvm::Triple::MipsSubArch_r6) {
+ DefMips32CPU = "mips32r6";
+ DefMips64CPU = "mips64r6";
+ }
+
// MIPS64r6 is the default for Android MIPS64 (mips64el-linux-android).
if (Triple.isAndroid()) {
DefMips32CPU = "mips32";
@@ -82,6 +87,9 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
}
}
+ if (ABIName.empty() && (Triple.getEnvironment() == llvm::Triple::GNUABIN32))
+ ABIName = "n32";
+
if (ABIName.empty() &&
(Triple.getVendor() == llvm::Triple::MipsTechnologies ||
Triple.getVendor() == llvm::Triple::ImaginationTechnologies)) {
diff --git a/lib/Driver/ToolChains/Arch/PPC.cpp b/lib/Driver/ToolChains/Arch/PPC.cpp
index f6a95962ac..791f1206cf 100644
--- a/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -107,15 +107,19 @@ void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (FloatABI == ppc::FloatABI::Soft)
Features.push_back("-hard-float");
- ppc::ReadGOTPtrMode ReadGOT = ppc::getPPCReadGOTPtrMode(D, Args);
+ ppc::ReadGOTPtrMode ReadGOT = ppc::getPPCReadGOTPtrMode(D, Triple, Args);
if (ReadGOT == ppc::ReadGOTPtrMode::SecurePlt)
Features.push_back("+secure-plt");
}
-ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const ArgList &Args) {
+ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args) {
if (Args.getLastArg(options::OPT_msecure_plt))
return ppc::ReadGOTPtrMode::SecurePlt;
- return ppc::ReadGOTPtrMode::Bss;
+ if (Triple.isOSOpenBSD())
+ return ppc::ReadGOTPtrMode::SecurePlt;
+ else
+ return ppc::ReadGOTPtrMode::Bss;
}
ppc::FloatABI ppc::getPPCFloatABI(const Driver &D, const ArgList &Args) {
diff --git a/lib/Driver/ToolChains/Arch/PPC.h b/lib/Driver/ToolChains/Arch/PPC.h
index 3acee91a2a..4f3cd688ca 100644
--- a/lib/Driver/ToolChains/Arch/PPC.h
+++ b/lib/Driver/ToolChains/Arch/PPC.h
@@ -38,7 +38,7 @@ FloatABI getPPCFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
std::string getPPCTargetCPU(const llvm::opt::ArgList &Args);
const char *getPPCAsmModeForCPU(StringRef Name);
-ReadGOTPtrMode getPPCReadGOTPtrMode(const Driver &D,
+ReadGOTPtrMode getPPCReadGOTPtrMode(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
void getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
diff --git a/lib/Driver/ToolChains/BareMetal.cpp b/lib/Driver/ToolChains/BareMetal.cpp
index adce10854b..31d16922cc 100644
--- a/lib/Driver/ToolChains/BareMetal.cpp
+++ b/lib/Driver/ToolChains/BareMetal.cpp
@@ -13,13 +13,13 @@
#include "InputInfo.h"
#include "Gnu.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm::opt;
@@ -119,8 +119,9 @@ void BareMetal::AddClangCXXStdlibIncludeArgs(
std::error_code EC;
Generic_GCC::GCCVersion Version = {"", -1, -1, -1, "", "", ""};
// Walk the subdirs, and find the one with the newest gcc version:
- for (vfs::directory_iterator LI =
- getDriver().getVFS().dir_begin(Dir.str(), EC), LE;
+ for (llvm::vfs::directory_iterator
+ LI = getDriver().getVFS().dir_begin(Dir.str(), EC),
+ LE;
!EC && LI != LE; LI = LI.increment(EC)) {
StringRef VersionText = llvm::sys::path::filename(LI->path());
auto CandidateVersion = Generic_GCC::GCCVersion::Parse(VersionText);
diff --git a/lib/Driver/ToolChains/Clang.cpp b/lib/Driver/ToolChains/Clang.cpp
index 3ca31ab5cb..e3dfb09c73 100644
--- a/lib/Driver/ToolChains/Clang.cpp
+++ b/lib/Driver/ToolChains/Clang.cpp
@@ -530,7 +530,8 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
return !areOptimizationsEnabled(Args);
}
- if (Triple.isOSLinux() || Triple.getOS() == llvm::Triple::CloudABI) {
+ if (Triple.isOSLinux() || Triple.getOS() == llvm::Triple::CloudABI ||
+ Triple.isOSHurd()) {
switch (Triple.getArch()) {
// Don't use a frame pointer on linux if optimizing for certain targets.
case llvm::Triple::mips64:
@@ -802,6 +803,29 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
CmdArgs.push_back("-fcoverage-mapping");
}
+ if (Args.hasArg(options::OPT_fprofile_exclude_files_EQ)) {
+ auto *Arg = Args.getLastArg(options::OPT_fprofile_exclude_files_EQ);
+ if (!Args.hasArg(options::OPT_coverage))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << "-fprofile-exclude-files="
+ << "--coverage";
+
+ StringRef v = Arg->getValue();
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fprofile-exclude-files=" + v)));
+ }
+
+ if (Args.hasArg(options::OPT_fprofile_filter_files_EQ)) {
+ auto *Arg = Args.getLastArg(options::OPT_fprofile_filter_files_EQ);
+ if (!Args.hasArg(options::OPT_coverage))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << "-fprofile-filter-files="
+ << "--coverage";
+
+ StringRef v = Arg->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Twine("-fprofile-filter-files=" + v)));
+ }
+
if (C.getArgs().hasArg(options::OPT_c) ||
C.getArgs().hasArg(options::OPT_S)) {
if (Output.isFilename()) {
@@ -1128,42 +1152,26 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
bool IsFirstImplicitInclude = !RenderedImplicitInclude;
RenderedImplicitInclude = true;
- // Use PCH if the user requested it.
- bool UsePCH = D.CCCUsePCH;
-
- bool FoundPTH = false;
bool FoundPCH = false;
SmallString<128> P(A->getValue());
// We want the files to have a name like foo.h.pch. Add a dummy extension
// so that replace_extension does the right thing.
P += ".dummy";
- if (UsePCH) {
- llvm::sys::path::replace_extension(P, "pch");
- if (llvm::sys::fs::exists(P))
- FoundPCH = true;
- }
+ llvm::sys::path::replace_extension(P, "pch");
+ if (llvm::sys::fs::exists(P))
+ FoundPCH = true;
if (!FoundPCH) {
- llvm::sys::path::replace_extension(P, "pth");
- if (llvm::sys::fs::exists(P))
- FoundPTH = true;
- }
-
- if (!FoundPCH && !FoundPTH) {
llvm::sys::path::replace_extension(P, "gch");
if (llvm::sys::fs::exists(P)) {
- FoundPCH = UsePCH;
- FoundPTH = !UsePCH;
+ FoundPCH = true;
}
}
- if (FoundPCH || FoundPTH) {
+ if (FoundPCH) {
if (IsFirstImplicitInclude) {
A->claim();
- if (UsePCH)
- CmdArgs.push_back("-include-pch");
- else
- CmdArgs.push_back("-include-pth");
+ CmdArgs.push_back("-include-pch");
CmdArgs.push_back(Args.MakeArgString(P));
continue;
} else {
@@ -1430,6 +1438,56 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
}
}
+// Parse -mbranch-protection=<protection>[+<protection>]* where
+// <protection> ::= standard | none | [bti,pac-ret[+b-key,+leaf]*]
+// Returns a triple of (return address signing Scope, signing key, require
+// landing pads)
+static std::tuple<StringRef, StringRef, bool>
+ParseAArch64BranchProtection(const Driver &D, const ArgList &Args,
+ const Arg *A) {
+ StringRef Scope = "none";
+ StringRef Key = "a_key";
+ bool IndirectBranches = false;
+
+ StringRef Value = A->getValue();
+ // This maps onto -mbranch-protection=<scope>+<key>
+
+ if (Value.equals("standard")) {
+ Scope = "non-leaf";
+ Key = "a_key";
+ IndirectBranches = true;
+
+ } else if (!Value.equals("none")) {
+ SmallVector<StringRef, 4> BranchProtection;
+ StringRef(A->getValue()).split(BranchProtection, '+');
+
+ auto Protection = BranchProtection.begin();
+ while (Protection != BranchProtection.end()) {
+ if (Protection->equals("bti"))
+ IndirectBranches = true;
+ else if (Protection->equals("pac-ret")) {
+ Scope = "non-leaf";
+ while (++Protection != BranchProtection.end()) {
+ // Inner loop as "leaf" and "b-key" options must only appear attached
+ // to pac-ret.
+ if (Protection->equals("leaf"))
+ Scope = "all";
+ else if (Protection->equals("b-key"))
+ Key = "b_key";
+ else
+ break;
+ }
+ Protection--;
+ } else
+ D.Diag(diag::err_invalid_branch_protection)
+ << *Protection << A->getAsString(Args);
+ Protection++;
+ }
+ }
+
+ return std::make_tuple(Scope, Key, IndirectBranches);
+}
+
namespace {
void RenderAArch64ABI(const llvm::Triple &Triple, const ArgList &Args,
ArgStringList &CmdArgs) {
@@ -1484,9 +1542,33 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
CmdArgs.push_back("-aarch64-enable-global-merge=true");
}
- if (Arg *A = Args.getLastArg(options::OPT_msign_return_address)) {
+ // Enable/disable return address signing and indirect branch targets.
+ if (Arg *A = Args.getLastArg(options::OPT_msign_return_address_EQ,
+ options::OPT_mbranch_protection_EQ)) {
+
+ const Driver &D = getToolChain().getDriver();
+
+ StringRef Scope, Key;
+ bool IndirectBranches;
+
+ if (A->getOption().matches(options::OPT_msign_return_address_EQ)) {
+ Scope = A->getValue();
+ if (!Scope.equals("none") && !Scope.equals("non-leaf") &&
+ !Scope.equals("all"))
+ D.Diag(diag::err_invalid_branch_protection)
+ << Scope << A->getAsString(Args);
+ Key = "a_key";
+ IndirectBranches = false;
+ } else
+ std::tie(Scope, Key, IndirectBranches) =
+ ParseAArch64BranchProtection(D, Args, A);
+
CmdArgs.push_back(
- Args.MakeArgString(Twine("-msign-return-address=") + A->getValue()));
+ Args.MakeArgString(Twine("-msign-return-address=") + Scope));
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-msign-return-address-key=") + Key));
+ if (IndirectBranches)
+ CmdArgs.push_back("-mbranch-target-enforce");
}
}
@@ -1739,6 +1821,10 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
Args.hasArg(options::OPT_fapple_kext))
CmdArgs.push_back("-disable-red-zone");
+ if (!Args.hasFlag(options::OPT_mtls_direct_seg_refs,
+ options::OPT_mno_tls_direct_seg_refs, true))
+ CmdArgs.push_back("-mno-tls-direct-seg-refs");
+
// Default to avoid implicit floating-point for kernel/kext code, but allow
// that to be overridden with -mno-soft-float.
bool NoImplicitFloat = (Args.hasArg(options::OPT_mkernel) ||
@@ -2066,6 +2152,9 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
}
CmdArgs.push_back(Value.data());
TakeNextArg = true;
+ } else if (Value == "-fdebug-compilation-dir") {
+ CmdArgs.push_back("-fdebug-compilation-dir");
+ TakeNextArg = true;
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
@@ -2078,6 +2167,11 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
CmdArgs.push_back("-target-feature");
CmdArgs.push_back(MipsTargetFeature);
}
+
+ // forward -fembed-bitcode to assmebler
+ if (C.getDriver().embedBitcodeEnabled() ||
+ C.getDriver().embedBitcodeMarkerOnly())
+ Args.AddLastArg(CmdArgs, options::OPT_fembed_bitcode_EQ);
}
static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
@@ -2266,6 +2360,9 @@ static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
// Treat blocks as analysis entry points.
CmdArgs.push_back("-analyzer-opt-analyze-nested-blocks");
+ // Enable compatilibily mode to avoid analyzer-config related errors.
+ CmdArgs.push_back("-analyzer-config-compatibility-mode=true");
+
// Add default argument set.
if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
CmdArgs.push_back("-analyzer-checker=core");
@@ -2694,8 +2791,10 @@ static void RenderCharacterOptions(const ArgList &Args, const llvm::Triple &T,
CmdArgs.push_back("-fno-signed-char");
}
- if (Args.hasFlag(options::OPT_fchar8__t, options::OPT_fno_char8__t, false))
- CmdArgs.push_back("-fchar8_t");
+ // The default depends on the language standard.
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fchar8__t, options::OPT_fno_char8__t))
+ A->render(Args, CmdArgs);
if (const Arg *A = Args.getLastArg(options::OPT_fshort_wchar,
options::OPT_fno_short_wchar)) {
@@ -2737,7 +2836,6 @@ static void RenderObjCOptions(const ToolChain &TC, const Driver &D,
// When ObjectiveC legacy runtime is in effect on MacOSX, turn on the option
// to do Array/Dictionary subscripting by default.
if (Arch == llvm::Triple::x86 && T.isMacOSX() &&
- !T.isMacOSXVersionLT(10, 7) &&
Runtime.getKind() == ObjCRuntime::FragileMacOSX && Runtime.isNeXTFamily())
CmdArgs.push_back("-fobjc-subscripting-legacy-runtime");
@@ -2773,6 +2871,18 @@ static void RenderObjCOptions(const ToolChain &TC, const Driver &D,
Args.ClaimAllArgs(options::OPT_fno_objc_arc_exceptions);
}
+ // Allow the user to control whether messages can be converted to runtime
+ // functions.
+ if (types::isObjC(Input.getType())) {
+ auto *Arg = Args.getLastArg(
+ options::OPT_fobjc_convert_messages_to_runtime_calls,
+ options::OPT_fno_objc_convert_messages_to_runtime_calls);
+ if (Arg &&
+ Arg->getOption().matches(
+ options::OPT_fno_objc_convert_messages_to_runtime_calls))
+ CmdArgs.push_back("-fno-objc-convert-messages-to-runtime-calls");
+ }
+
// -fobjc-infer-related-result-type is the default, except in the Objective-C
// rewriter.
if (InferCovariantReturns)
@@ -2908,12 +3018,35 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
CmdArgs.push_back("-fno-spell-checking");
}
+enum class DwarfFissionKind { None, Split, Single };
+
+static DwarfFissionKind getDebugFissionKind(const Driver &D,
+ const ArgList &Args, Arg *&Arg) {
+ Arg =
+ Args.getLastArg(options::OPT_gsplit_dwarf, options::OPT_gsplit_dwarf_EQ);
+ if (!Arg)
+ return DwarfFissionKind::None;
+
+ if (Arg->getOption().matches(options::OPT_gsplit_dwarf))
+ return DwarfFissionKind::Split;
+
+ StringRef Value = Arg->getValue();
+ if (Value == "split")
+ return DwarfFissionKind::Split;
+ if (Value == "single")
+ return DwarfFissionKind::Single;
+
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << Arg->getOption().getName() << Arg->getValue();
+ return DwarfFissionKind::None;
+}
+
static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
const llvm::Triple &T, const ArgList &Args,
bool EmitCodeView, bool IsWindowsMSVC,
ArgStringList &CmdArgs,
codegenoptions::DebugInfoKind &DebugInfoKind,
- const Arg *&SplitDWARFArg) {
+ DwarfFissionKind &DwarfFission) {
if (Args.hasFlag(options::OPT_fdebug_info_for_profiling,
options::OPT_fno_debug_info_for_profiling, false) &&
checkDebugInfoOption(
@@ -2938,10 +3071,12 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
Args.ClaimAllArgs(options::OPT_g_Group);
- SplitDWARFArg = Args.getLastArg(options::OPT_gsplit_dwarf);
+ Arg* SplitDWARFArg;
+ DwarfFission = getDebugFissionKind(D, Args, SplitDWARFArg);
- if (SplitDWARFArg && !checkDebugInfoOption(SplitDWARFArg, Args, D, TC)) {
- SplitDWARFArg = nullptr;
+ if (DwarfFission != DwarfFissionKind::None &&
+ !checkDebugInfoOption(SplitDWARFArg, Args, D, TC)) {
+ DwarfFission = DwarfFissionKind::None;
SplitDWARFInlining = false;
}
@@ -2958,13 +3093,13 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// composing split-dwarf and line-tables-only, so let those compose
// naturally in that case. And if you just turned off debug info,
// (-gsplit-dwarf -g0) - do that.
- if (SplitDWARFArg) {
+ if (DwarfFission != DwarfFissionKind::None) {
if (A->getIndex() > SplitDWARFArg->getIndex()) {
if (DebugInfoKind == codegenoptions::NoDebugInfo ||
DebugInfoKind == codegenoptions::DebugDirectivesOnly ||
(DebugInfoKind == codegenoptions::DebugLineTablesOnly &&
SplitDWARFInlining))
- SplitDWARFArg = nullptr;
+ DwarfFission = DwarfFissionKind::None;
} else if (SplitDWARFInlining)
DebugInfoKind = codegenoptions::NoDebugInfo;
}
@@ -2997,21 +3132,24 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (checkDebugInfoOption(A, Args, D, TC))
DWARFVersion = DwarfVersionNum(A->getSpelling());
- // Forward -gcodeview. EmitCodeView might have been set by CL-compatibility
- // argument parsing.
- if (EmitCodeView) {
- if (const Arg *A = Args.getLastArg(options::OPT_gcodeview)) {
- EmitCodeView = checkDebugInfoOption(A, Args, D, TC);
- if (EmitCodeView) {
- // DWARFVersion remains at 0 if no explicit choice was made.
- CmdArgs.push_back("-gcodeview");
- }
- }
+ if (const Arg *A = Args.getLastArg(options::OPT_gcodeview)) {
+ if (checkDebugInfoOption(A, Args, D, TC))
+ EmitCodeView = true;
}
+ // If the user asked for debug info but did not explicitly specify -gcodeview
+ // or -gdwarf, ask the toolchain for the default format.
if (!EmitCodeView && DWARFVersion == 0 &&
- DebugInfoKind != codegenoptions::NoDebugInfo)
- DWARFVersion = TC.GetDefaultDwarfVersion();
+ DebugInfoKind != codegenoptions::NoDebugInfo) {
+ switch (TC.getDefaultDebugFormat()) {
+ case codegenoptions::DIF_CodeView:
+ EmitCodeView = true;
+ break;
+ case codegenoptions::DIF_DWARF:
+ DWARFVersion = TC.GetDefaultDwarfVersion();
+ break;
+ }
+ }
// -gline-directives-only supported only for the DWARF debug info.
if (DWARFVersion == 0 && DebugInfoKind == codegenoptions::DebugDirectivesOnly)
@@ -3047,15 +3185,19 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// -gsplit-dwarf should turn on -g and enable the backend dwarf
// splitting and extraction.
- // FIXME: Currently only works on Linux.
- if (T.isOSLinux()) {
+ // FIXME: Currently only works on Linux and Fuchsia.
+ if (T.isOSLinux() || T.isOSFuchsia()) {
if (!SplitDWARFInlining)
CmdArgs.push_back("-fno-split-dwarf-inlining");
- if (SplitDWARFArg) {
+ if (DwarfFission != DwarfFissionKind::None) {
if (DebugInfoKind == codegenoptions::NoDebugInfo)
DebugInfoKind = codegenoptions::LimitedDebugInfo;
- CmdArgs.push_back("-enable-split-dwarf");
+
+ if (DwarfFission == DwarfFissionKind::Single)
+ CmdArgs.push_back("-enable-split-dwarf=single");
+ else
+ CmdArgs.push_back("-enable-split-dwarf");
}
}
@@ -3086,6 +3228,19 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-gembed-source");
}
+ if (EmitCodeView) {
+ CmdArgs.push_back("-gcodeview");
+
+ // Emit codeview type hashes if requested.
+ if (Args.hasFlag(options::OPT_gcodeview_ghash,
+ options::OPT_gno_codeview_ghash, false)) {
+ CmdArgs.push_back("-gcodeview-ghash");
+ }
+ }
+
+ // Adjust the debug info kind for the given toolchain.
+ TC.adjustDebugInfoKind(DebugInfoKind, Args);
+
RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, DWARFVersion,
DebuggerTuning);
@@ -3100,7 +3255,8 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
const auto *PubnamesArg =
Args.getLastArg(options::OPT_ggnu_pubnames, options::OPT_gno_gnu_pubnames,
options::OPT_gpubnames, options::OPT_gno_pubnames);
- if (SplitDWARFArg || DebuggerTuning == llvm::DebuggerKind::LLDB ||
+ if (DwarfFission != DwarfFissionKind::None ||
+ DebuggerTuning == llvm::DebuggerKind::LLDB ||
(PubnamesArg && checkDebugInfoOption(PubnamesArg, Args, D, TC)))
if (!PubnamesArg ||
(!PubnamesArg->getOption().matches(options::OPT_gno_gnu_pubnames) &&
@@ -3110,6 +3266,11 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
? "-gpubnames"
: "-ggnu-pubnames");
+ if (Args.hasFlag(options::OPT_fdebug_ranges_base_address,
+ options::OPT_fno_debug_ranges_base_address, false)) {
+ CmdArgs.push_back("-fdebug-ranges-base-address");
+ }
+
// -gdwarf-aranges turns on the emission of the aranges section in the
// backend.
// Always enabled for SCE tuning.
@@ -3172,18 +3333,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
bool IsCuda = JA.isOffloading(Action::OFK_Cuda);
bool IsHIP = JA.isOffloading(Action::OFK_HIP);
bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
- bool IsModulePrecompile =
- isa<PrecompileJobAction>(JA) && JA.getType() == types::TY_ModuleFile;
bool IsHeaderModulePrecompile = isa<HeaderModulePrecompileJobAction>(JA);
// A header module compilation doesn't have a main input file, so invent a
// fake one as a placeholder.
- // FIXME: Pick the language based on the header file language.
const char *ModuleName = [&]{
auto *ModuleNameArg = Args.getLastArg(options::OPT_fmodule_name_EQ);
return ModuleNameArg ? ModuleNameArg->getValue() : "";
}();
- InputInfo HeaderModuleInput(types::TY_CXXModule, ModuleName, ModuleName);
+ InputInfo HeaderModuleInput(Inputs[0].getType(), ModuleName, ModuleName);
const InputInfo &Input =
IsHeaderModulePrecompile ? HeaderModuleInput : Inputs[0];
@@ -3194,10 +3352,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
for (const InputInfo &I : Inputs) {
if (&I == &Input) {
// This is the primary input.
- } else if (IsModulePrecompile &&
+ } else if (IsHeaderModulePrecompile &&
types::getPrecompiledType(I.getType()) == types::TY_PCH) {
- types::ID Expected =
- types::lookupHeaderTypeForSourceType(Inputs[0].getType());
+ types::ID Expected = HeaderModuleInput.getType();
if (I.getType() != Expected) {
D.Diag(diag::err_drv_module_header_wrong_kind)
<< I.getFilename() << types::getTypeName(I.getType())
@@ -3316,19 +3473,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Also ignore explicit -force_cpusubtype_ALL option.
(void)Args.hasArg(options::OPT_force__cpusubtype__ALL);
} else if (isa<PrecompileJobAction>(JA)) {
- // Use PCH if the user requested it.
- bool UsePCH = D.CCCUsePCH;
-
if (JA.getType() == types::TY_Nothing)
CmdArgs.push_back("-fsyntax-only");
else if (JA.getType() == types::TY_ModuleFile)
CmdArgs.push_back(IsHeaderModulePrecompile
? "-emit-header-module"
: "-emit-module-interface");
- else if (UsePCH)
- CmdArgs.push_back("-emit-pch");
else
- CmdArgs.push_back("-emit-pth");
+ CmdArgs.push_back("-emit-pch");
} else if (isa<VerifyPCHJobAction>(JA)) {
CmdArgs.push_back("-verify-pch");
} else {
@@ -3805,22 +3957,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
types::ID InputType = Input.getType();
if (D.IsCLMode())
AddClangCLArgs(Args, InputType, CmdArgs, &DebugInfoKind, &EmitCodeView);
- else
- EmitCodeView = Args.hasArg(options::OPT_gcodeview);
- const Arg *SplitDWARFArg = nullptr;
+ DwarfFissionKind DwarfFission;
RenderDebugOptions(TC, D, RawTriple, Args, EmitCodeView, IsWindowsMSVC,
- CmdArgs, DebugInfoKind, SplitDWARFArg);
+ CmdArgs, DebugInfoKind, DwarfFission);
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
- bool SplitDWARF = SplitDWARFArg && RawTriple.isOSLinux() &&
+ bool SplitDWARF = (DwarfFission != DwarfFissionKind::None) &&
+ (RawTriple.isOSLinux() || RawTriple.isOSFuchsia()) &&
(isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
isa<BackendJobAction>(JA));
const char *SplitDWARFOut;
if (SplitDWARF) {
CmdArgs.push_back("-split-dwarf-file");
- SplitDWARFOut = SplitDebugName(Args, Input);
+ SplitDWARFOut = SplitDebugName(Args, Output);
CmdArgs.push_back(SplitDWARFOut);
}
@@ -4101,6 +4252,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_relocatable_pch))
CmdArgs.push_back("-relocatable-pch");
+ if (const Arg *A = Args.getLastArg(options::OPT_fcf_runtime_abi_EQ)) {
+ static const char *kCFABIs[] = {
+ "standalone", "objc", "swift", "swift-5.0", "swift-4.2", "swift-4.1",
+ };
+
+ if (find(kCFABIs, StringRef(A->getValue())) == std::end(kCFABIs))
+ D.Diag(diag::err_drv_invalid_cf_runtime_abi) << A->getValue();
+ else
+ A->render(Args, CmdArgs);
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_fconstant_string_class_EQ)) {
CmdArgs.push_back("-fconstant-string-class");
CmdArgs.push_back(A->getValue());
@@ -4168,6 +4330,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_global_new_delete_hidden);
Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ);
@@ -4208,6 +4371,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fopenmp_simd,
options::OPT_fno_openmp_simd);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
+ Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_cuda_number_of_sm_EQ);
+ Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_cuda_blocks_per_sm_EQ);
// When in OpenMP offloading mode with NVPTX target, forward
// cuda-mode flag
@@ -4293,8 +4458,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_pthread);
- Args.AddLastArg(CmdArgs, options::OPT_mspeculative_load_hardening,
- options::OPT_mno_speculative_load_hardening);
+ if (Args.hasFlag(options::OPT_mspeculative_load_hardening, options::OPT_mno_speculative_load_hardening,
+ false))
+ CmdArgs.push_back(Args.MakeArgString("-mspeculative-load-hardening"));
RenderSSPOptions(TC, Args, CmdArgs, KernelOrKext);
@@ -4355,6 +4521,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else
A->render(Args, CmdArgs);
}
+ Args.AddLastArg(CmdArgs, options::OPT_fprofile_remapping_file_EQ);
RenderBuiltinOptions(TC, RawTriple, Args, CmdArgs);
@@ -4786,7 +4953,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// nice to enable this when doing a crashdump for modules as well.
if (Args.hasFlag(options::OPT_frewrite_includes,
options::OPT_fno_rewrite_includes, false) ||
- (C.isForDiagnostics() && (RewriteImports || !HaveModules)))
+ (C.isForDiagnostics() && !HaveModules))
CmdArgs.push_back("-frewrite-includes");
// Only allow -traditional or -traditional-cpp outside in preprocessing modes.
@@ -4896,14 +5063,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = D.getClangProgramPath();
- // Optionally embed the -cc1 level arguments into the debug info, for build
- // analysis.
+ // Optionally embed the -cc1 level arguments into the debug info or a
+ // section, for build analysis.
// Also record command line arguments into the debug info if
// -grecord-gcc-switches options is set on.
// By default, -gno-record-gcc-switches is set on and no recording.
- if (TC.UseDwarfDebugFlags() ||
- Args.hasFlag(options::OPT_grecord_gcc_switches,
- options::OPT_gno_record_gcc_switches, false)) {
+ auto GRecordSwitches =
+ Args.hasFlag(options::OPT_grecord_command_line,
+ options::OPT_gno_record_command_line, false);
+ auto FRecordSwitches =
+ Args.hasFlag(options::OPT_frecord_command_line,
+ options::OPT_fno_record_command_line, false);
+ if (FRecordSwitches && !Triple.isOSBinFormatELF())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Args.getLastArg(options::OPT_frecord_command_line)->getAsString(Args)
+ << TripleStr;
+ if (TC.UseDwarfDebugFlags() || GRecordSwitches || FRecordSwitches) {
ArgStringList OriginalArgs;
for (const auto &Arg : Args)
Arg->render(Args, OriginalArgs);
@@ -4916,20 +5091,27 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Flags += " ";
Flags += EscapedArg;
}
- CmdArgs.push_back("-dwarf-debug-flags");
- CmdArgs.push_back(Args.MakeArgString(Flags));
+ auto FlagsArgString = Args.MakeArgString(Flags);
+ if (TC.UseDwarfDebugFlags() || GRecordSwitches) {
+ CmdArgs.push_back("-dwarf-debug-flags");
+ CmdArgs.push_back(FlagsArgString);
+ }
+ if (FRecordSwitches) {
+ CmdArgs.push_back("-record-command-line");
+ CmdArgs.push_back(FlagsArgString);
+ }
}
- if (IsCuda) {
- // Host-side cuda compilation receives all device-side outputs in a single
- // fatbin as Inputs[1]. Include the binary with -fcuda-include-gpubinary.
- if (CudaDeviceInput) {
+ // Host-side cuda compilation receives all device-side outputs in a single
+ // fatbin as Inputs[1]. Include the binary with -fcuda-include-gpubinary.
+ if ((IsCuda || IsHIP) && CudaDeviceInput) {
CmdArgs.push_back("-fcuda-include-gpubinary");
CmdArgs.push_back(CudaDeviceInput->getFilename());
- }
+ if (Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false))
+ CmdArgs.push_back("-fgpu-rdc");
+ }
- if (Args.hasFlag(options::OPT_fcuda_rdc, options::OPT_fno_cuda_rdc, false))
- CmdArgs.push_back("-fcuda-rdc");
+ if (IsCuda) {
if (Args.hasFlag(options::OPT_fcuda_short_ptr,
options::OPT_fno_cuda_short_ptr, false))
CmdArgs.push_back("-fcuda-short-ptr");
@@ -5066,6 +5248,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
+ // Make the compile command echo its inputs for /showFilenames.
+ if (Output.getType() == types::TY_Object &&
+ Args.hasFlag(options::OPT__SLASH_showFilenames,
+ options::OPT__SLASH_showFilenames_, false)) {
+ C.getJobs().getJobs().back()->setPrintInputFilenames(true);
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_pg))
if (!shouldUseFramePointer(Args, Triple))
D.Diag(diag::err_drv_argument_not_allowed_with) << "-fomit-frame-pointer"
@@ -5074,8 +5263,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Claim some arguments which clang supports automatically.
// -fpch-preprocess is used with gcc to add a special marker in the output to
- // include the PCH file. Clang's PTH solution is completely transparent, so we
- // do not need to deal with it at all.
+ // include the PCH file.
Args.ClaimAllArgs(options::OPT_fpch_preprocess);
// Claim some arguments which clang doesn't support, but we don't
@@ -5370,7 +5558,6 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
*DebugInfoKind = codegenoptions::LimitedDebugInfo;
else
*DebugInfoKind = codegenoptions::DebugLineTablesOnly;
- CmdArgs.push_back("-gcodeview");
} else {
*EmitCodeView = false;
}
@@ -5404,6 +5591,16 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
if (VolatileOptionID == options::OPT__SLASH_volatile_ms)
CmdArgs.push_back("-fms-volatile");
+ if (Args.hasFlag(options::OPT__SLASH_Zc_dllexportInlines_,
+ options::OPT__SLASH_Zc_dllexportInlines,
+ false)) {
+ if (Args.hasArg(options::OPT__SLASH_fallback)) {
+ D.Diag(clang::diag::err_drv_dllexport_inlines_and_fallback);
+ } else {
+ CmdArgs.push_back("-fno-dllexport-inlines");
+ }
+ }
+
Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg);
Arg *BestCaseArg = Args.getLastArg(options::OPT__SLASH_vmb);
if (MostGeneralArg && BestCaseArg)
@@ -5751,10 +5948,12 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (Args.hasArg(options::OPT_gsplit_dwarf) &&
- getToolChain().getTriple().isOSLinux()) {
+ const llvm::Triple &T = getToolChain().getTriple();
+ Arg *A;
+ if ((getDebugFissionKind(D, Args, A) == DwarfFissionKind::Split) &&
+ (T.isOSLinux() || T.isOSFuchsia())) {
CmdArgs.push_back("-split-dwarf-file");
- CmdArgs.push_back(SplitDebugName(Args, Input));
+ CmdArgs.push_back(SplitDebugName(Args, Output));
}
assert(Input.isFilename() && "Invalid input.");
diff --git a/lib/Driver/ToolChains/CommonArgs.cpp b/lib/Driver/ToolChains/CommonArgs.cpp
index 9e6aeea958..2bdff39f0d 100644
--- a/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/lib/Driver/ToolChains/CommonArgs.cpp
@@ -14,13 +14,13 @@
#include "Arch/PPC.h"
#include "Arch/SystemZ.h"
#include "Arch/X86.h"
+#include "HIP.h"
#include "Hexagon.h"
#include "InputInfo.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
@@ -51,6 +51,7 @@
#include "llvm/Support/Program.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLParser.h"
using namespace clang::driver;
@@ -559,6 +560,40 @@ static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
return false;
}
+static void addSanitizerLibPath(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs, StringRef Name) {
+ for (const auto &LibPath : TC.getLibraryPaths()) {
+ if (!LibPath.empty()) {
+ SmallString<128> P(LibPath);
+ llvm::sys::path::append(P, Name);
+ if (TC.getVFS().exists(P))
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + P));
+ }
+ }
+}
+
+void tools::addSanitizerPathLibArgs(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+ if (SanArgs.needsAsanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "asan");
+ }
+ if (SanArgs.needsHwasanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "hwasan");
+ }
+ if (SanArgs.needsLsanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "lsan");
+ }
+ if (SanArgs.needsMsanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "msan");
+ }
+ if (SanArgs.needsTsanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "tsan");
+ }
+}
+
+
+
void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
ArgStringList &CmdArgs) {
// Force linking against the system libraries sanitizers depends on
@@ -773,21 +808,18 @@ bool tools::areOptimizationsEnabled(const ArgList &Args) {
return false;
}
-const char *tools::SplitDebugName(const ArgList &Args, const InputInfo &Input) {
- Arg *FinalOutput = Args.getLastArg(options::OPT_o);
- if (FinalOutput && Args.hasArg(options::OPT_c)) {
- SmallString<128> T(FinalOutput->getValue());
- llvm::sys::path::replace_extension(T, "dwo");
- return Args.MakeArgString(T);
- } else {
- // Use the compilation dir.
- SmallString<128> T(
- Args.getLastArgValue(options::OPT_fdebug_compilation_dir));
- SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput()));
- llvm::sys::path::replace_extension(F, "dwo");
- T += F;
- return Args.MakeArgString(F);
- }
+const char *tools::SplitDebugName(const ArgList &Args,
+ const InputInfo &Output) {
+ SmallString<128> F(Output.isFilename()
+ ? Output.getFilename()
+ : llvm::sys::path::stem(Output.getBaseInput()));
+
+ if (Arg *A = Args.getLastArg(options::OPT_gsplit_dwarf_EQ))
+ if (StringRef(A->getValue()) == "single")
+ return Args.MakeArgString(F);
+
+ llvm::sys::path::replace_extension(F, "dwo");
+ return Args.MakeArgString(F);
}
void tools::SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
@@ -1337,6 +1369,18 @@ void tools::AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
if (!JA.isHostOffloading(Action::OFK_HIP))
return;
+ InputInfoList DeviceInputs;
+ for (const auto &II : Inputs) {
+ const Action *A = II.getAction();
+ // Is this a device linking action?
+ if (A && isa<LinkJobAction>(A) && A->isDeviceOffloading(Action::OFK_HIP)) {
+ DeviceInputs.push_back(II);
+ }
+ }
+
+ if (DeviceInputs.empty())
+ return;
+
// Create temporary linker script. Keep it if save-temps is enabled.
const char *LKS;
SmallString<256> Name = llvm::sys::path::filename(Output.getFilename());
@@ -1364,39 +1408,12 @@ void tools::AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
"Wrong platform");
(void)HIPTC;
- // Construct clang-offload-bundler command to bundle object files for
- // for different GPU archs.
- ArgStringList BundlerArgs;
- BundlerArgs.push_back(Args.MakeArgString("-type=o"));
-
- // ToDo: Remove the dummy host binary entry which is required by
- // clang-offload-bundler.
- std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
- std::string BundlerInputArg = "-inputs=/dev/null";
-
- for (const auto &II : Inputs) {
- const Action *A = II.getAction();
- // Is this a device linking action?
- if (A && isa<LinkJobAction>(A) && A->isDeviceOffloading(Action::OFK_HIP)) {
- BundlerTargetArg = BundlerTargetArg + ",hip-amdgcn-amd-amdhsa-" +
- StringRef(A->getOffloadingArch()).str();
- BundlerInputArg = BundlerInputArg + "," + II.getFilename();
- }
- }
- BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
- BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
-
- std::string BundleFileName = C.getDriver().GetTemporaryPath("BUNDLE", "o");
+ // The output file name needs to persist through the compilation, therefore
+ // it needs to be created through MakeArgString.
+ std::string BundleFileName = C.getDriver().GetTemporaryPath("BUNDLE", "hipfb");
const char *BundleFile =
C.addTempFile(C.getArgs().MakeArgString(BundleFileName.c_str()));
- auto BundlerOutputArg =
- Args.MakeArgString(std::string("-outputs=").append(BundleFile));
- BundlerArgs.push_back(BundlerOutputArg);
-
- SmallString<128> BundlerPath(C.getDriver().Dir);
- llvm::sys::path::append(BundlerPath, "clang-offload-bundler");
- const char *Bundler = Args.MakeArgString(BundlerPath);
- C.addCommand(llvm::make_unique<Command>(JA, T, Bundler, BundlerArgs, Inputs));
+ AMDGCN::constructHIPFatbinCommand(C, JA, BundleFile, DeviceInputs, Args, T);
// Add commands to embed target binaries. We ensure that each section and
// image is 16-byte aligned. This is not mandatory, but increases the
@@ -1416,6 +1433,10 @@ void tools::AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
LksStream << " PROVIDE_HIDDEN(__hip_fatbin = .);\n";
LksStream << " " << BundleFileName << "\n";
LksStream << " }\n";
+ LksStream << " /DISCARD/ :\n";
+ LksStream << " {\n";
+ LksStream << " * ( __CLANG_OFFLOAD_BUNDLE__* )\n";
+ LksStream << " }\n";
LksStream << "}\n";
LksStream << "INSERT BEFORE .data\n";
LksStream.flush();
diff --git a/lib/Driver/ToolChains/CommonArgs.h b/lib/Driver/ToolChains/CommonArgs.h
index e8ebe2225e..3704b2e01b 100644
--- a/lib/Driver/ToolChains/CommonArgs.h
+++ b/lib/Driver/ToolChains/CommonArgs.h
@@ -32,6 +32,10 @@ void claimNoWarnArgs(const llvm::opt::ArgList &Args);
bool addSanitizerRuntimes(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
+void addSanitizerPathLibArgs(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
void linkSanitizerRuntimeDeps(const ToolChain &TC,
llvm::opt::ArgStringList &CmdArgs);
@@ -59,7 +63,7 @@ void AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
const Tool &T);
const char *SplitDebugName(const llvm::opt::ArgList &Args,
- const InputInfo &Input);
+ const InputInfo &Output);
void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
const JobAction &JA, const llvm::opt::ArgList &Args,
diff --git a/lib/Driver/ToolChains/CrossWindows.cpp b/lib/Driver/ToolChains/CrossWindows.cpp
index 6ca04a8a3a..795356026f 100644
--- a/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/lib/Driver/ToolChains/CrossWindows.cpp
@@ -20,6 +20,7 @@ using namespace clang::driver;
using namespace clang::driver::toolchains;
using llvm::opt::ArgList;
+using llvm::opt::ArgStringList;
void tools::CrossWindows::Assembler::ConstructJob(
Compilation &C, const JobAction &JA, const InputInfo &Output,
diff --git a/lib/Driver/ToolChains/Cuda.cpp b/lib/Driver/ToolChains/Cuda.cpp
index 08e49fa6a5..57b8d4340e 100644
--- a/lib/Driver/ToolChains/Cuda.cpp
+++ b/lib/Driver/ToolChains/Cuda.cpp
@@ -11,7 +11,6 @@
#include "CommonArgs.h"
#include "InputInfo.h"
#include "clang/Basic/Cuda.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Distro.h"
@@ -23,6 +22,7 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
using namespace clang::driver;
@@ -114,7 +114,7 @@ CudaInstallationDetector::CudaInstallationDetector(
for (const char *Ver : Versions)
Candidates.emplace_back(D.SysRoot + "/usr/local/cuda-" + Ver);
- if (Distro(D.getVFS()).IsDebian())
+ if (Distro(D.getVFS()).IsDebian() || Distro(D.getVFS()).IsUbuntu())
// Special case for Debian to have nvidia-cuda-toolkit work
// out of the box. More info on http://bugs.debian.org/882505
Candidates.emplace_back(D.SysRoot + "/usr/lib/cuda");
@@ -278,32 +278,44 @@ void CudaInstallationDetector::print(raw_ostream &OS) const {
}
namespace {
- /// Debug info kind.
-enum DebugInfoKind {
- NoDebug, /// No debug info.
- LineTableOnly, /// Line tables only.
- FullDebug /// Full debug info.
+/// Debug info level for the NVPTX devices. We may need to emit different debug
+/// info level for the host and for the device itselfi. This type controls
+/// emission of the debug info for the devices. It either prohibits disable info
+/// emission completely, or emits debug directives only, or emits same debug
+/// info as for the host.
+enum DeviceDebugInfoLevel {
+ DisableDebugInfo, /// Do not emit debug info for the devices.
+ DebugDirectivesOnly, /// Emit only debug directives.
+ EmitSameDebugInfoAsHost, /// Use the same debug info level just like for the
+ /// host.
};
} // anonymous namespace
-static DebugInfoKind mustEmitDebugInfo(const ArgList &Args) {
- Arg *A = Args.getLastArg(options::OPT_O_Group);
- if (Args.hasFlag(options::OPT_cuda_noopt_device_debug,
- options::OPT_no_cuda_noopt_device_debug,
- !A || A->getOption().matches(options::OPT_O0))) {
- if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
- const Option &Opt = A->getOption();
- if (Opt.matches(options::OPT_gN_Group)) {
- if (Opt.matches(options::OPT_g0) || Opt.matches(options::OPT_ggdb0))
- return NoDebug;
- if (Opt.matches(options::OPT_gline_tables_only) ||
- Opt.matches(options::OPT_ggdb1))
- return LineTableOnly;
- }
- return FullDebug;
+/// Define debug info level for the NVPTX devices. If the debug info for both
+/// the host and device are disabled (-g0/-ggdb0 or no debug options at all). If
+/// only debug directives are requested for the both host and device
+/// (-gline-directvies-only), or the debug info only for the device is disabled
+/// (optimization is on and --cuda-noopt-device-debug was not specified), the
+/// debug directves only must be emitted for the device. Otherwise, use the same
+/// debug info level just like for the host (with the limitations of only
+/// supported DWARF2 standard).
+static DeviceDebugInfoLevel mustEmitDebugInfo(const ArgList &Args) {
+ const Arg *A = Args.getLastArg(options::OPT_O_Group);
+ bool IsDebugEnabled = !A || A->getOption().matches(options::OPT_O0) ||
+ Args.hasFlag(options::OPT_cuda_noopt_device_debug,
+ options::OPT_no_cuda_noopt_device_debug,
+ /*Default=*/false);
+ if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ const Option &Opt = A->getOption();
+ if (Opt.matches(options::OPT_gN_Group)) {
+ if (Opt.matches(options::OPT_g0) || Opt.matches(options::OPT_ggdb0))
+ return DisableDebugInfo;
+ if (Opt.matches(options::OPT_gline_directives_only))
+ return DebugDirectivesOnly;
}
+ return IsDebugEnabled ? EmitSameDebugInfoAsHost : DebugDirectivesOnly;
}
- return NoDebug;
+ return DisableDebugInfo;
}
void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -337,8 +349,8 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
ArgStringList CmdArgs;
CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-m64" : "-m32");
- DebugInfoKind DIKind = mustEmitDebugInfo(Args);
- if (DIKind == FullDebug) {
+ DeviceDebugInfoLevel DIKind = mustEmitDebugInfo(Args);
+ if (DIKind == EmitSameDebugInfoAsHost) {
// ptxas does not accept -g option if optimization is enabled, so
// we ignore the compiler's -O* options if we want debug info.
CmdArgs.push_back("-g");
@@ -374,7 +386,7 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// to no optimizations, but ptxas's default is -O3.
CmdArgs.push_back("-O0");
}
- if (DIKind == LineTableOnly)
+ if (DIKind == DebugDirectivesOnly)
CmdArgs.push_back("-lineinfo");
// Pass -v to ptxas if it was passed to the driver.
@@ -398,8 +410,8 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fnoopenmp_relocatable_target,
/*Default=*/true);
else if (JA.isOffloading(Action::OFK_Cuda))
- Relocatable = Args.hasFlag(options::OPT_fcuda_rdc,
- options::OPT_fno_cuda_rdc, /*Default=*/false);
+ Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
+ options::OPT_fno_gpu_rdc, /*Default=*/false);
if (Relocatable)
CmdArgs.push_back("-c");
@@ -445,7 +457,7 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-64" : "-32");
CmdArgs.push_back(Args.MakeArgString("--create"));
CmdArgs.push_back(Args.MakeArgString(Output.getFilename()));
- if (mustEmitDebugInfo(Args) == FullDebug)
+ if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
CmdArgs.push_back("-g");
for (const auto& II : Inputs) {
@@ -498,7 +510,7 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Output.getFilename());
} else
assert(Output.isNothing() && "Invalid output.");
- if (mustEmitDebugInfo(Args) == FullDebug)
+ if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
CmdArgs.push_back("-g");
if (Args.hasArg(options::OPT_v))
@@ -609,9 +621,9 @@ void CudaToolChain::addClangTargetOptions(
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
- if (DriverArgs.hasFlag(options::OPT_fcuda_rdc, options::OPT_fno_cuda_rdc,
+ if (DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
false))
- CC1Args.push_back("-fcuda-rdc");
+ CC1Args.push_back("-fgpu-rdc");
}
if (DriverArgs.hasArg(options::OPT_nocudalib))
@@ -704,6 +716,21 @@ bool CudaToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
O.matches(options::OPT_gcolumn_info);
}
+void CudaToolChain::adjustDebugInfoKind(
+ codegenoptions::DebugInfoKind &DebugInfoKind, const ArgList &Args) const {
+ switch (mustEmitDebugInfo(Args)) {
+ case DisableDebugInfo:
+ DebugInfoKind = codegenoptions::NoDebugInfo;
+ break;
+ case DebugDirectivesOnly:
+ DebugInfoKind = codegenoptions::DebugDirectivesOnly;
+ break;
+ case EmitSameDebugInfoAsHost:
+ // Use same debug info level as the host.
+ break;
+ }
+}
+
void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Check our CUDA version if we're going to include the CUDA headers.
diff --git a/lib/Driver/ToolChains/Cuda.h b/lib/Driver/ToolChains/Cuda.h
index 01580cb669..1d63ede411 100644
--- a/lib/Driver/ToolChains/Cuda.h
+++ b/lib/Driver/ToolChains/Cuda.h
@@ -159,6 +159,8 @@ public:
bool isPICDefaultForced() const override { return false; }
bool SupportsProfiling() const override { return false; }
bool supportsDebugInfoOption(const llvm::opt::Arg *A) const override;
+ void adjustDebugInfoKind(codegenoptions::DebugInfoKind &DebugInfoKind,
+ const llvm::opt::ArgList &Args) const override;
bool IsMathErrnoDefault() const override { return false; }
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
diff --git a/lib/Driver/ToolChains/Darwin.cpp b/lib/Driver/ToolChains/Darwin.cpp
index 9d7f7848a5..e5dafa2b09 100644
--- a/lib/Driver/ToolChains/Darwin.cpp
+++ b/lib/Driver/ToolChains/Darwin.cpp
@@ -12,7 +12,6 @@
#include "CommonArgs.h"
#include "clang/Basic/AlignedAllocation.h"
#include "clang/Basic/ObjCRuntime.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -23,6 +22,7 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <cstdlib> // ::getenv
using namespace clang::driver;
@@ -98,7 +98,7 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
SourceAction = SourceAction->getInputs()[0];
}
- // If -fno-integrated-as is used add -Q to the darwin assember driver to make
+ // If -fno-integrated-as is used add -Q to the darwin assembler driver to make
// sure it runs its system assembler not clang's integrated assembler.
// Applicable to darwin11+ and Xcode 4+. darwin<10 lacked integrated-as.
// FIXME: at run-time detect assembler capabilities or rely on version
@@ -1034,9 +1034,17 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
// runtime, automatically export symbols necessary to implement some of the
// runtime's functionality.
if (hasExportSymbolDirective(Args)) {
- addExportedSymbol(CmdArgs, "___llvm_profile_filename");
- addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");
- addExportedSymbol(CmdArgs, "_lprofCurFilename");
+ if (needsGCovInstrumentation(Args)) {
+ addExportedSymbol(CmdArgs, "___gcov_flush");
+ addExportedSymbol(CmdArgs, "_flush_fn_list");
+ addExportedSymbol(CmdArgs, "_writeout_fn_list");
+ } else {
+ addExportedSymbol(CmdArgs, "___llvm_profile_filename");
+ addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");
+ addExportedSymbol(CmdArgs, "_lprofCurFilename");
+ addExportedSymbol(CmdArgs, "_lprofMergeValueProfData");
+ }
+ addExportedSymbol(CmdArgs, "_lprofDirMode");
}
}
@@ -1674,6 +1682,38 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
}
}
+void DarwinClang::AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ // The implementation from a base class will pass through the -stdlib to
+ // CC1Args.
+ // FIXME: this should not be necessary, remove usages in the frontend
+ // (e.g. HeaderSearchOptions::UseLibcxx) and don't pipe -stdlib.
+ ToolChain::AddClangCXXStdlibIncludeArgs(DriverArgs, CC1Args);
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ llvm::StringRef InstallDir = getDriver().getInstalledDir();
+ if (InstallDir.empty())
+ break;
+ // On Darwin, libc++ may be installed alongside the compiler in
+ // include/c++/v1.
+ // Get from 'foo/bin' to 'foo'.
+ SmallString<128> P = llvm::sys::path::parent_path(InstallDir);
+ // Get to 'foo/include/c++/v1'.
+ llvm::sys::path::append(P, "include", "c++", "v1");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ break;
+ }
+ case ToolChain::CST_Libstdcxx:
+ // FIXME: should we do something about it?
+ break;
+ }
+}
void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
CXXStdlibType Type = GetCXXStdlibType(Args);
diff --git a/lib/Driver/ToolChains/Darwin.h b/lib/Driver/ToolChains/Darwin.h
index 7a0f74e67a..e6a88dce3c 100644
--- a/lib/Driver/ToolChains/Darwin.h
+++ b/lib/Driver/ToolChains/Darwin.h
@@ -494,6 +494,10 @@ public:
void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
diff --git a/lib/Driver/ToolChains/FreeBSD.cpp b/lib/Driver/ToolChains/FreeBSD.cpp
index c16eabf069..7a176d260a 100644
--- a/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/lib/Driver/ToolChains/FreeBSD.cpp
@@ -12,11 +12,11 @@
#include "Arch/Mips.h"
#include "Arch/Sparc.h"
#include "CommonArgs.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
diff --git a/lib/Driver/ToolChains/Fuchsia.cpp b/lib/Driver/ToolChains/Fuchsia.cpp
index 54c34ff159..de2c7411c5 100644
--- a/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/lib/Driver/ToolChains/Fuchsia.cpp
@@ -76,10 +76,11 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
else if (Args.hasArg(options::OPT_shared))
CmdArgs.push_back("-shared");
+ const SanitizerArgs &SanArgs = ToolChain.getSanitizerArgs();
+
if (!Args.hasArg(options::OPT_shared)) {
std::string Dyld = D.DyldPrefix;
- if (ToolChain.getSanitizerArgs().needsAsanRt() &&
- ToolChain.getSanitizerArgs().needsSharedRt())
+ if (SanArgs.needsAsanRt() && SanArgs.needsSharedRt())
Dyld += "asan/";
Dyld += "ld.so.1";
CmdArgs.push_back("-dynamic-linker");
@@ -98,6 +99,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_u);
+ addSanitizerPathLibArgs(ToolChain, Args, CmdArgs);
+
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
if (D.isUsingLTO()) {
@@ -106,8 +109,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
D.getLTOMode() == LTOK_Thin);
}
- addSanitizerRuntimes(ToolChain, Args, CmdArgs);
-
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
+ bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
ToolChain.addProfileRTLibs(Args, CmdArgs);
@@ -119,15 +122,24 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (ToolChain.ShouldLinkCXXStdlib(Args)) {
bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
!Args.hasArg(options::OPT_static);
+ CmdArgs.push_back("--push-state");
+ CmdArgs.push_back("--as-needed");
if (OnlyLibstdcxxStatic)
CmdArgs.push_back("-Bstatic");
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
if (OnlyLibstdcxxStatic)
CmdArgs.push_back("-Bdynamic");
+ CmdArgs.push_back("-lm");
+ CmdArgs.push_back("--pop-state");
}
- CmdArgs.push_back("-lm");
}
+ if (NeedsSanitizerDeps)
+ linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+
+ if (NeedsXRayDeps)
+ linkXRayRuntimeDeps(ToolChain, CmdArgs);
+
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
if (Args.hasArg(options::OPT_pthread) ||
diff --git a/lib/Driver/ToolChains/Gnu.cpp b/lib/Driver/ToolChains/Gnu.cpp
index fda02b50a2..3850e1c02e 100644
--- a/lib/Driver/ToolChains/Gnu.cpp
+++ b/lib/Driver/ToolChains/Gnu.cpp
@@ -16,7 +16,6 @@
#include "Arch/SystemZ.h"
#include "CommonArgs.h"
#include "Linux.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
@@ -27,6 +26,7 @@
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
using namespace clang::driver;
@@ -228,6 +228,30 @@ void tools::gcc::Linker::RenderExtraToolArgs(const JobAction &JA,
// The types are (hopefully) good enough.
}
+// On Arm the endianness of the output file is determined by the target and
+// can be overridden by the pseudo-target flags '-mlittle-endian'/'-EL' and
+// '-mbig-endian'/'-EB'. Unlike other targets the flag does not result in a
+// normalized triple so we must handle the flag here.
+static bool isArmBigEndian(const llvm::Triple &Triple,
+ const ArgList &Args) {
+ bool IsBigEndian = false;
+ switch (Triple.getArch()) {
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ IsBigEndian = true;
+ LLVM_FALLTHROUGH;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian))
+ IsBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
+ break;
+ default:
+ break;
+ }
+ return IsBigEndian;
+}
+
static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
switch (T.getArch()) {
case llvm::Triple::x86:
@@ -240,10 +264,9 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
return "aarch64linuxb";
case llvm::Triple::arm:
case llvm::Triple::thumb:
- return "armelf_linux_eabi";
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- return "armelfb_linux_eabi";
+ return isArmBigEndian(T, Args) ? "armelfb_linux_eabi" : "armelf_linux_eabi";
case llvm::Triple::ppc:
return "elf32ppclinux";
case llvm::Triple::ppc64:
@@ -264,11 +287,13 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
case llvm::Triple::mipsel:
return "elf32ltsmip";
case llvm::Triple::mips64:
- if (tools::mips::hasMipsAbiArg(Args, "n32"))
+ if (tools::mips::hasMipsAbiArg(Args, "n32") ||
+ T.getEnvironment() == llvm::Triple::GNUABIN32)
return "elf32btsmipn32";
return "elf64btsmip";
case llvm::Triple::mips64el:
- if (tools::mips::hasMipsAbiArg(Args, "n32"))
+ if (tools::mips::hasMipsAbiArg(Args, "n32") ||
+ T.getEnvironment() == llvm::Triple::GNUABIN32)
return "elf32ltsmipn32";
return "elf64ltsmip";
case llvm::Triple::systemz:
@@ -335,8 +360,13 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("-s");
- if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb)
- arm::appendEBLinkFlags(Args, CmdArgs, Triple);
+ if (Triple.isARM() || Triple.isThumb() || Triple.isAArch64()) {
+ bool IsBigEndian = isArmBigEndian(Triple, Args);
+ if (IsBigEndian)
+ arm::appendBE8LinkFlag(Args, CmdArgs, Triple);
+ IsBigEndian = IsBigEndian || Arch == llvm::Triple::aarch64_be;
+ CmdArgs.push_back(IsBigEndian ? "-EB" : "-EL");
+ }
// Most Android ARM64 targets should enable the linker fix for erratum
// 843419. Only non-Cortex-A53 devices are allowed to skip this flag.
@@ -638,6 +668,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
case llvm::Triple::thumb:
case llvm::Triple::thumbeb: {
const llvm::Triple &Triple2 = getToolChain().getTriple();
+ CmdArgs.push_back(isArmBigEndian(Triple2, Args) ? "-EB" : "-EL");
switch (Triple2.getSubArch()) {
case llvm::Triple::ARMSubArch_v7:
CmdArgs.push_back("-mfpu=neon");
@@ -670,6 +701,8 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
}
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be: {
+ CmdArgs.push_back(
+ getToolChain().getArch() == llvm::Triple::aarch64_be ? "-EB" : "-EL");
Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
normalizeCPUNamesForAssembler(Args, CmdArgs);
@@ -784,17 +817,17 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
if (Args.hasArg(options::OPT_gsplit_dwarf) &&
getToolChain().getTriple().isOSLinux())
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
- SplitDebugName(Args, Inputs[0]));
+ SplitDebugName(Args, Output));
}
namespace {
// Filter to remove Multilibs that don't exist as a suffix to Path
class FilterNonExistent {
StringRef Base, File;
- vfs::FileSystem &VFS;
+ llvm::vfs::FileSystem &VFS;
public:
- FilterNonExistent(StringRef Base, StringRef File, vfs::FileSystem &VFS)
+ FilterNonExistent(StringRef Base, StringRef File, llvm::vfs::FileSystem &VFS)
: Base(Base), File(File), VFS(VFS) {}
bool operator()(const Multilib &M) {
return !VFS.exists(Base + M.gccSuffix() + File);
@@ -940,7 +973,7 @@ static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
return false;
}
-static bool findMipsAndroidMultilibs(vfs::FileSystem &VFS, StringRef Path,
+static bool findMipsAndroidMultilibs(llvm::vfs::FileSystem &VFS, StringRef Path,
const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
@@ -1618,10 +1651,18 @@ Generic_GCC::GCCVersion Generic_GCC::GCCVersion::Parse(StringRef VersionText) {
return GoodVersion;
}
-static llvm::StringRef getGCCToolchainDir(const ArgList &Args) {
+static llvm::StringRef getGCCToolchainDir(const ArgList &Args,
+ llvm::StringRef SysRoot) {
const Arg *A = Args.getLastArg(clang::driver::options::OPT_gcc_toolchain);
if (A)
return A->getValue();
+
+ // If we have a SysRoot, ignore GCC_INSTALL_PREFIX.
+ // GCC_INSTALL_PREFIX specifies the gcc installation for the default
+ // sysroot and is likely not valid with a different sysroot.
+ if (!SysRoot.empty())
+ return "";
+
return GCC_INSTALL_PREFIX;
}
@@ -1653,7 +1694,7 @@ void Generic_GCC::GCCInstallationDetector::init(
SmallVector<std::string, 8> Prefixes(D.PrefixDirs.begin(),
D.PrefixDirs.end());
- StringRef GCCToolchainDir = getGCCToolchainDir(Args);
+ StringRef GCCToolchainDir = getGCCToolchainDir(Args, D.SysRoot);
if (GCCToolchainDir != "") {
if (GCCToolchainDir.back() == '/')
GCCToolchainDir = GCCToolchainDir.drop_back(); // remove the /
@@ -1762,7 +1803,8 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
std::string PrefixDir = SysRoot.str() + "/usr/gcc";
std::error_code EC;
- for (vfs::directory_iterator LI = D.getVFS().dir_begin(PrefixDir, EC), LE;
+ for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(PrefixDir, EC),
+ LE;
!EC && LI != LE; LI = LI.increment(EC)) {
StringRef VersionText = llvm::sys::path::filename(LI->path());
GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
@@ -1805,19 +1847,21 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const AArch64LibDirs[] = {"/lib64", "/lib"};
static const char *const AArch64Triples[] = {
"aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-redhat-linux",
- "aarch64-suse-linux"};
+ "aarch64-suse-linux", "aarch64-linux-android"};
static const char *const AArch64beLibDirs[] = {"/lib"};
static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu",
"aarch64_be-linux-gnu"};
static const char *const ARMLibDirs[] = {"/lib"};
- static const char *const ARMTriples[] = {"arm-linux-gnueabi"};
+ static const char *const ARMTriples[] = {"arm-linux-gnueabi",
+ "arm-linux-androideabi"};
static const char *const ARMHFTriples[] = {"arm-linux-gnueabihf",
"armv7hl-redhat-linux-gnueabi",
"armv6hl-suse-linux-gnueabi",
"armv7hl-suse-linux-gnueabi"};
static const char *const ARMebLibDirs[] = {"/lib"};
- static const char *const ARMebTriples[] = {"armeb-linux-gnueabi"};
+ static const char *const ARMebTriples[] = {"armeb-linux-gnueabi",
+ "armeb-linux-androideabi"};
static const char *const ARMebHFTriples[] = {
"armeb-linux-gnueabihf", "armebv7hl-redhat-linux-gnueabi"};
@@ -1828,32 +1872,44 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
"x86_64-redhat-linux", "x86_64-suse-linux",
"x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
"x86_64-slackware-linux", "x86_64-unknown-linux",
- "x86_64-amazon-linux"};
+ "x86_64-amazon-linux", "x86_64-linux-android"};
static const char *const X32LibDirs[] = {"/libx32"};
static const char *const X86LibDirs[] = {"/lib32", "/lib"};
static const char *const X86Triples[] = {
"i686-linux-gnu", "i686-pc-linux-gnu", "i486-linux-gnu",
"i386-linux-gnu", "i386-redhat-linux6E", "i686-redhat-linux",
"i586-redhat-linux", "i386-redhat-linux", "i586-suse-linux",
- "i486-slackware-linux", "i686-montavista-linux", "i586-linux-gnu"};
+ "i486-slackware-linux", "i686-montavista-linux", "i586-linux-gnu",
+ "i686-linux-android", "i386-gnu", "i486-gnu",
+ "i586-gnu", "i686-gnu"};
static const char *const MIPSLibDirs[] = {"/lib"};
- static const char *const MIPSTriples[] = {"mips-linux-gnu", "mips-mti-linux",
- "mips-mti-linux-gnu",
- "mips-img-linux-gnu"};
+ static const char *const MIPSTriples[] = {
+ "mips-linux-gnu", "mips-mti-linux", "mips-mti-linux-gnu",
+ "mips-img-linux-gnu", "mipsisa32r6-linux-gnu"};
static const char *const MIPSELLibDirs[] = {"/lib"};
- static const char *const MIPSELTriples[] = {"mipsel-linux-gnu",
- "mips-img-linux-gnu"};
+ static const char *const MIPSELTriples[] = {
+ "mipsel-linux-gnu", "mips-img-linux-gnu", "mipsisa32r6el-linux-gnu",
+ "mipsel-linux-android"};
static const char *const MIPS64LibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64Triples[] = {
- "mips64-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
- "mips64-linux-gnuabi64"};
+ "mips64-linux-gnu", "mips-mti-linux-gnu",
+ "mips-img-linux-gnu", "mips64-linux-gnuabi64",
+ "mipsisa64r6-linux-gnu", "mipsisa64r6-linux-gnuabi64"};
static const char *const MIPS64ELLibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64ELTriples[] = {
- "mips64el-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
- "mips64el-linux-gnuabi64"};
-
+ "mips64el-linux-gnu", "mips-mti-linux-gnu",
+ "mips-img-linux-gnu", "mips64el-linux-gnuabi64",
+ "mipsisa64r6el-linux-gnu", "mipsisa64r6el-linux-gnuabi64",
+ "mips64el-linux-android"};
+
+ static const char *const MIPSN32LibDirs[] = {"/lib32"};
+ static const char *const MIPSN32Triples[] = {"mips64-linux-gnuabin32",
+ "mipsisa64r6-linux-gnuabin32"};
+ static const char *const MIPSN32ELLibDirs[] = {"/lib32"};
+ static const char *const MIPSN32ELTriples[] = {
+ "mips64el-linux-gnuabin32", "mipsisa64r6el-linux-gnuabin32"};
static const char *const PPCLibDirs[] = {"/lib32", "/lib"};
static const char *const PPCTriples[] = {
@@ -2050,6 +2106,8 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
BiarchLibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
BiarchTripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
+ BiarchLibDirs.append(begin(MIPSN32LibDirs), end(MIPSN32LibDirs));
+ BiarchTripleAliases.append(begin(MIPSN32Triples), end(MIPSN32Triples));
break;
case llvm::Triple::mipsel:
LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
@@ -2057,18 +2115,24 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
BiarchTripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
+ BiarchLibDirs.append(begin(MIPSN32ELLibDirs), end(MIPSN32ELLibDirs));
+ BiarchTripleAliases.append(begin(MIPSN32ELTriples), end(MIPSN32ELTriples));
break;
case llvm::Triple::mips64:
LibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
TripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
BiarchLibDirs.append(begin(MIPSLibDirs), end(MIPSLibDirs));
BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ BiarchLibDirs.append(begin(MIPSN32LibDirs), end(MIPSN32LibDirs));
+ BiarchTripleAliases.append(begin(MIPSN32Triples), end(MIPSN32Triples));
break;
case llvm::Triple::mips64el:
LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
TripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
BiarchTripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ BiarchLibDirs.append(begin(MIPSN32ELLibDirs), end(MIPSN32ELLibDirs));
+ BiarchTripleAliases.append(begin(MIPSN32ELTriples), end(MIPSN32ELTriples));
BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
break;
case llvm::Triple::ppc:
@@ -2197,6 +2261,9 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
// triple.
{"i386-linux-gnu/gcc/" + CandidateTriple.str(), "../../..",
(TargetArch == llvm::Triple::x86 &&
+ TargetTriple.getOS() != llvm::Triple::Solaris)},
+ {"i386-gnu/gcc/" + CandidateTriple.str(), "../../..",
+ (TargetArch == llvm::Triple::x86 &&
TargetTriple.getOS() != llvm::Triple::Solaris)}};
for (auto &Suffix : Suffixes) {
@@ -2205,7 +2272,7 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
StringRef LibSuffix = Suffix.LibSuffix;
std::error_code EC;
- for (vfs::directory_iterator
+ for (llvm::vfs::directory_iterator
LI = D.getVFS().dir_begin(LibDir + "/" + LibSuffix, EC),
LE;
!EC && LI != LE; LI = LI.increment(EC)) {
@@ -2405,18 +2472,9 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
case llvm::Triple::systemz:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
- return true;
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
- // Enabled for Debian, Android, FreeBSD and OpenBSD mips64/mipsel, as they
- // can precisely identify the ABI in use (Debian) or only use N64 for MIPS64
- // (Android). Other targets are unable to distinguish N32 from N64.
- if (getTriple().getEnvironment() == llvm::Triple::GNUABI64 ||
- getTriple().isAndroid() ||
- getTriple().isOSFreeBSD() ||
- getTriple().isOSOpenBSD())
- return true;
- return false;
+ return true;
default:
return false;
}
diff --git a/lib/Driver/ToolChains/HIP.cpp b/lib/Driver/ToolChains/HIP.cpp
index 6efcfaee8f..b44aadda79 100644
--- a/lib/Driver/ToolChains/HIP.cpp
+++ b/lib/Driver/ToolChains/HIP.cpp
@@ -81,8 +81,8 @@ const char *AMDGCN::Linker::constructLLVMLinkCommand(
else
FlushDenormalControlBC = "oclc_daz_opt_off.amdgcn.bc";
- BCLibs.append({"opencl.amdgcn.bc",
- "ocml.amdgcn.bc", "ockl.amdgcn.bc", "irif.amdgcn.bc",
+ BCLibs.append({"hip.amdgcn.bc", "opencl.amdgcn.bc",
+ "ocml.amdgcn.bc", "ockl.amdgcn.bc",
"oclc_finite_only_off.amdgcn.bc",
FlushDenormalControlBC,
"oclc_correctly_rounded_sqrt_on.amdgcn.bc",
@@ -154,7 +154,7 @@ const char *AMDGCN::Linker::constructLlcCommand(
llvm::StringRef OutputFilePrefix, const char *InputFileName) const {
// Construct llc command.
ArgStringList LlcArgs{InputFileName, "-mtriple=amdgcn-amd-amdhsa",
- "-filetype=obj",
+ "-filetype=obj", "-mattr=-code-object-v3",
Args.MakeArgString("-mcpu=" + SubArchName), "-o"};
std::string LlcOutputFileName =
C.getDriver().GetTemporaryPath(OutputFilePrefix, "o");
@@ -184,6 +184,40 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
C.addCommand(llvm::make_unique<Command>(JA, *this, Lld, LldArgs, Inputs));
}
+// Construct a clang-offload-bundler command to bundle code objects for
+// different GPU's into a HIP fat binary.
+void AMDGCN::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
+ StringRef OutputFileName, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args, const Tool& T) {
+ // Construct clang-offload-bundler command to bundle object files for
+ // for different GPU archs.
+ ArgStringList BundlerArgs;
+ BundlerArgs.push_back(Args.MakeArgString("-type=o"));
+
+ // ToDo: Remove the dummy host binary entry which is required by
+ // clang-offload-bundler.
+ std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
+ std::string BundlerInputArg = "-inputs=/dev/null";
+
+ for (const auto &II : Inputs) {
+ const auto* A = II.getAction();
+ BundlerTargetArg = BundlerTargetArg + ",hip-amdgcn-amd-amdhsa-" +
+ StringRef(A->getOffloadingArch()).str();
+ BundlerInputArg = BundlerInputArg + "," + II.getFilename();
+ }
+ BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
+ BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
+
+ auto BundlerOutputArg =
+ Args.MakeArgString(std::string("-outputs=").append(OutputFileName));
+ BundlerArgs.push_back(BundlerOutputArg);
+
+ SmallString<128> BundlerPath(C.getDriver().Dir);
+ llvm::sys::path::append(BundlerPath, "clang-offload-bundler");
+ const char *Bundler = Args.MakeArgString(BundlerPath);
+ C.addCommand(llvm::make_unique<Command>(JA, T, Bundler, BundlerArgs, Inputs));
+}
+
// For amdgcn the inputs of the linker job are device bitcode and output is
// object file. It calls llvm-link, opt, llc, then lld steps.
void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -192,6 +226,9 @@ void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const ArgList &Args,
const char *LinkingOutput) const {
+ if (JA.getType() == types::TY_HIP_FATBIN)
+ return constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs, Args, *this);
+
assert(getToolChain().getTriple().getArch() == llvm::Triple::amdgcn &&
"Unsupported target");
@@ -244,9 +281,9 @@ void HIPToolChain::addClangTargetOptions(
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
- if (DriverArgs.hasFlag(options::OPT_fcuda_rdc, options::OPT_fno_cuda_rdc,
+ if (DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
false))
- CC1Args.push_back("-fcuda-rdc");
+ CC1Args.push_back("-fgpu-rdc");
// Default to "hidden" visibility, as object level linking will not be
// supported for the foreseeable future.
diff --git a/lib/Driver/ToolChains/HIP.h b/lib/Driver/ToolChains/HIP.h
index 40c9128e2f..3af19d44da 100644
--- a/lib/Driver/ToolChains/HIP.h
+++ b/lib/Driver/ToolChains/HIP.h
@@ -19,6 +19,11 @@ namespace driver {
namespace tools {
namespace AMDGCN {
+ // Construct command for creating HIP fatbin.
+ void constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
+ StringRef OutputFileName, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs, const Tool& T);
+
// Runs llvm-link/opt/llc/lld, which links multiple LLVM bitcode, together with
// device library, then compiles it to ISA in a shared object.
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
diff --git a/lib/Driver/ToolChains/Hexagon.cpp b/lib/Driver/ToolChains/Hexagon.cpp
index 778852ffc5..d302a3e24d 100644
--- a/lib/Driver/ToolChains/Hexagon.cpp
+++ b/lib/Driver/ToolChains/Hexagon.cpp
@@ -8,9 +8,8 @@
//===----------------------------------------------------------------------===//
#include "Hexagon.h"
-#include "InputInfo.h"
#include "CommonArgs.h"
-#include "clang/Basic/VirtualFileSystem.h"
+#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -19,6 +18,7 @@
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -32,6 +32,7 @@ static StringRef getDefaultHvxLength(StringRef Cpu) {
.Case("v60", "64b")
.Case("v62", "64b")
.Case("v65", "64b")
+ .Case("v66", "128b")
.Default("128b");
}
@@ -75,7 +76,7 @@ static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
// Handle -mhvx-length=.
if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_length_EQ)) {
- // These falgs are valid only if HVX in enabled.
+ // These flags are valid only if HVX in enabled.
if (!HasHVX)
D.Diag(diag::err_drv_invalid_hvx_length);
else if (A->getOption().matches(options::OPT_mhexagon_hvx_length_EQ))
@@ -369,9 +370,8 @@ void hexagon::Linker::ConstructJob(Compilation &C, const JobAction &JA,
constructHexagonLinkArgs(C, JA, HTC, Output, Inputs, Args, CmdArgs,
LinkingOutput);
- std::string Linker = HTC.GetProgramPath("hexagon-link");
- C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ const char *Exec = Args.MakeArgString(HTC.GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
// Hexagon tools end.
diff --git a/lib/Driver/ToolChains/Hexagon.h b/lib/Driver/ToolChains/Hexagon.h
index e43b8a5b88..a9e599de7a 100644
--- a/lib/Driver/ToolChains/Hexagon.h
+++ b/lib/Driver/ToolChains/Hexagon.h
@@ -81,6 +81,9 @@ public:
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+
+ const char *getDefaultLinker() const override { return "hexagon-link"; }
+
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
diff --git a/lib/Driver/ToolChains/Hurd.cpp b/lib/Driver/ToolChains/Hurd.cpp
new file mode 100644
index 0000000000..ff7b685dae
--- /dev/null
+++ b/lib/Driver/ToolChains/Hurd.cpp
@@ -0,0 +1,169 @@
+//===--- Hurd.cpp - Hurd ToolChain Implementations --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Hurd.h"
+#include "CommonArgs.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+
+using tools::addPathIfExists;
+
+/// Get our best guess at the multiarch triple for a target.
+///
+/// Debian-based systems are starting to use a multiarch setup where they use
+/// a target-triple directory in the library and header search paths.
+/// Unfortunately, this triple does not align with the vanilla target triple,
+/// so we provide a rough mapping here.
+static std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) {
+ if (TargetTriple.getArch() == llvm::Triple::x86) {
+ // We use the existence of '/lib/<triple>' as a directory to detect some
+ // common hurd triples that don't quite match the Clang triple for both
+ // 32-bit and 64-bit targets. Multiarch fixes its install triples to these
+ // regardless of what the actual target triple is.
+ if (D.getVFS().exists(SysRoot + "/lib/i386-gnu"))
+ return "i386-gnu";
+ }
+
+ // For most architectures, just use whatever we have rather than trying to be
+ // clever.
+ return TargetTriple.str();
+}
+
+static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
+ // It happens that only x86 and PPC use the 'lib32' variant of oslibdir, and
+ // using that variant while targeting other architectures causes problems
+ // because the libraries are laid out in shared system roots that can't cope
+ // with a 'lib32' library search path being considered. So we only enable
+ // them when we know we may need it.
+ //
+ // FIXME: This is a bit of a hack. We should really unify this code for
+ // reasoning about oslibdir spellings with the lib dir spellings in the
+ // GCCInstallationDetector, but that is a more significant refactoring.
+
+ if (Triple.getArch() == llvm::Triple::x86)
+ return "lib32";
+
+ return Triple.isArch32Bit() ? "lib" : "lib64";
+}
+
+Hurd::Hurd(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ std::string SysRoot = computeSysRoot();
+ path_list &Paths = getFilePaths();
+
+ const std::string OSLibDir = getOSLibDir(Triple, Args);
+ const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
+
+ // If we are currently running Clang inside of the requested system root, add
+ // its parent library paths to those searched.
+ // FIXME: It's not clear whether we should use the driver's installed
+ // directory ('Dir' below) or the ResourceDir.
+ if (StringRef(D.Dir).startswith(SysRoot)) {
+ addPathIfExists(D, D.Dir + "/../lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, D.Dir + "/../" + OSLibDir, Paths);
+ }
+
+ addPathIfExists(D, SysRoot + "/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, SysRoot + "/lib/../" + OSLibDir, Paths);
+
+ addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
+
+ // If we are currently running Clang inside of the requested system root, add
+ // its parent library path to those searched.
+ // FIXME: It's not clear whether we should use the driver's installed
+ // directory ('Dir' below) or the ResourceDir.
+ if (StringRef(D.Dir).startswith(SysRoot))
+ addPathIfExists(D, D.Dir + "/../lib", Paths);
+
+ addPathIfExists(D, SysRoot + "/lib", Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib", Paths);
+}
+
+bool Hurd::HasNativeLLVMSupport() const { return true; }
+
+Tool *Hurd::buildLinker() const { return new tools::gnutools::Linker(*this); }
+
+Tool *Hurd::buildAssembler() const {
+ return new tools::gnutools::Assembler(*this);
+}
+
+std::string Hurd::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+
+ return std::string();
+}
+
+std::string Hurd::getDynamicLinker(const ArgList &Args) const {
+ if (getArch() == llvm::Triple::x86)
+ return "/lib/ld.so";
+
+ llvm_unreachable("unsupported architecture");
+}
+
+void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ std::string SysRoot = computeSysRoot();
+
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nostdlibinc))
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> Dirs;
+ CIncludeDirs.split(Dirs, ":");
+ for (StringRef Dir : Dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(Dir) ? StringRef(SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + Dir);
+ }
+ return;
+ }
+
+ // Lacking those, try to detect the correct set of system includes for the
+ // target triple.
+ if (getTriple().getArch() == llvm::Triple::x86) {
+ std::string Path = SysRoot + "/usr/include/i386-gnu";
+ if (D.getVFS().exists(Path))
+ addExternCSystemInclude(DriverArgs, CC1Args, Path);
+ }
+
+ // Add an include of '/include' directly. This isn't provided by default by
+ // system GCCs, but is often used with cross-compiling GCCs, and harmless to
+ // add even when Clang is acting as-if it were a system compiler.
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/include");
+
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+}
diff --git a/lib/Driver/ToolChains/Hurd.h b/lib/Driver/ToolChains/Hurd.h
new file mode 100644
index 0000000000..d14619f0e2
--- /dev/null
+++ b/lib/Driver/ToolChains/Hurd.h
@@ -0,0 +1,46 @@
+//===--- Hurd.h - Hurd ToolChain Implementations ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Hurd_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Hurd_H
+
+#include "Gnu.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY Hurd : public Generic_ELF {
+public:
+ Hurd(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool HasNativeLLVMSupport() const override;
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ virtual std::string computeSysRoot() const;
+
+ virtual std::string getDynamicLinker(const llvm::opt::ArgList &Args) const;
+
+ std::vector<std::string> ExtraOpts;
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Hurd_H
diff --git a/lib/Driver/ToolChains/Linux.cpp b/lib/Driver/ToolChains/Linux.cpp
index a7f4e9332b..65ab9b2daf 100644
--- a/lib/Driver/ToolChains/Linux.cpp
+++ b/lib/Driver/ToolChains/Linux.cpp
@@ -13,7 +13,6 @@
#include "Arch/PPC.h"
#include "Arch/RISCV.h"
#include "CommonArgs.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Distro.h"
#include "clang/Driver/Driver.h"
@@ -23,6 +22,7 @@
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
using namespace clang::driver;
@@ -44,6 +44,7 @@ static std::string getMultiarchTriple(const Driver &D,
llvm::Triple::EnvironmentType TargetEnvironment =
TargetTriple.getEnvironment();
bool IsAndroid = TargetTriple.isAndroid();
+ bool IsMipsR6 = TargetTriple.getSubArch() == llvm::Triple::MipsSubArch_r6;
// For most architectures, just use whatever we have rather than trying to be
// clever.
@@ -101,30 +102,36 @@ static std::string getMultiarchTriple(const Driver &D,
if (D.getVFS().exists(SysRoot + "/lib/aarch64_be-linux-gnu"))
return "aarch64_be-linux-gnu";
break;
- case llvm::Triple::mips:
- if (D.getVFS().exists(SysRoot + "/lib/mips-linux-gnu"))
- return "mips-linux-gnu";
+ case llvm::Triple::mips: {
+ std::string Arch = IsMipsR6 ? "mipsisa32r6" : "mips";
+ if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-gnu"))
+ return Arch + "-linux-gnu";
break;
- case llvm::Triple::mipsel:
+ }
+ case llvm::Triple::mipsel: {
if (IsAndroid)
return "mipsel-linux-android";
- if (D.getVFS().exists(SysRoot + "/lib/mipsel-linux-gnu"))
- return "mipsel-linux-gnu";
+ std::string Arch = IsMipsR6 ? "mipsisa32r6el" : "mipsel";
+ if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-gnu"))
+ return Arch + "-linux-gnu";
break;
- case llvm::Triple::mips64:
- if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnu"))
- return "mips64-linux-gnu";
- if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnuabi64"))
- return "mips64-linux-gnuabi64";
+ }
+ case llvm::Triple::mips64: {
+ std::string Arch = IsMipsR6 ? "mipsisa64r6" : "mips64";
+ std::string ABI = llvm::Triple::getEnvironmentTypeName(TargetEnvironment);
+ if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-" + ABI))
+ return Arch + "-linux-" + ABI;
break;
- case llvm::Triple::mips64el:
+ }
+ case llvm::Triple::mips64el: {
if (IsAndroid)
return "mips64el-linux-android";
- if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnu"))
- return "mips64el-linux-gnu";
- if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnuabi64"))
- return "mips64el-linux-gnuabi64";
+ std::string Arch = IsMipsR6 ? "mipsisa64r6el" : "mips64el";
+ std::string ABI = llvm::Triple::getEnvironmentTypeName(TargetEnvironment);
+ if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-" + ABI))
+ return Arch + "-linux-" + ABI;
break;
+ }
case llvm::Triple::ppc:
if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnuspe"))
return "powerpc-linux-gnuspe";
@@ -229,16 +236,25 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
Distro Distro(D.getVFS());
- if (Distro.IsAlpineLinux()) {
+ if (Distro.IsAlpineLinux() || Triple.isAndroid()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("now");
}
- if (Distro.IsOpenSUSE() || Distro.IsUbuntu() || Distro.IsAlpineLinux()) {
+ if (Distro.IsOpenSUSE() || Distro.IsUbuntu() || Distro.IsAlpineLinux() ||
+ Triple.isAndroid()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("relro");
}
+ // The lld default page size is too large for Aarch64, which produces much
+ // larger .so files and images for arm64 device targets. Use 4KB page size
+ // for Android arm64 targets instead.
+ if (Triple.isAArch64() && Triple.isAndroid()) {
+ ExtraOpts.push_back("-z");
+ ExtraOpts.push_back("max-page-size=4096");
+ }
+
if (GCCInstallation.getParentLibPath().find("opt/rh/devtoolset") !=
StringRef::npos)
// With devtoolset on RHEL, we want to add a bin directory that is relative
@@ -264,15 +280,18 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// and the MIPS ABI require .dynsym to be sorted in different ways.
// .gnu.hash needs symbols to be grouped by hash code whereas the MIPS
// ABI requires a mapping between the GOT and the symbol table.
- // Android loader does not support .gnu.hash.
+ // Android loader does not support .gnu.hash until API 23.
// Hexagon linker/loader does not support .gnu.hash
- if (!IsMips && !IsAndroid && !IsHexagon) {
+ if (!IsMips && !IsHexagon) {
if (Distro.IsRedhat() || Distro.IsOpenSUSE() || Distro.IsAlpineLinux() ||
- (Distro.IsUbuntu() && Distro >= Distro::UbuntuMaverick))
+ (Distro.IsUbuntu() && Distro >= Distro::UbuntuMaverick) ||
+ (IsAndroid && !Triple.isAndroidVersionLT(23)))
ExtraOpts.push_back("--hash-style=gnu");
- if (Distro.IsDebian() || Distro.IsOpenSUSE() || Distro == Distro::UbuntuLucid ||
- Distro == Distro::UbuntuJaunty || Distro == Distro::UbuntuKarmic)
+ if (Distro.IsDebian() || Distro.IsOpenSUSE() ||
+ Distro == Distro::UbuntuLucid || Distro == Distro::UbuntuJaunty ||
+ Distro == Distro::UbuntuKarmic ||
+ (IsAndroid && Triple.isAndroidVersionLT(23)))
ExtraOpts.push_back("--hash-style=both");
}
@@ -432,6 +451,12 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
addPathIfExists(D, SysRoot + "/usr/lib", Paths);
}
+ToolChain::CXXStdlibType Linux::GetDefaultCXXStdlibType() const {
+ if (getTriple().isAndroid())
+ return ToolChain::CST_Libcxx;
+ return ToolChain::CST_Libstdcxx;
+}
+
bool Linux::HasNativeLLVMSupport() const { return true; }
Tool *Linux::buildLinker() const { return new tools::gnutools::Linker(*this); }
@@ -694,10 +719,25 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
const StringRef MIPSELMultiarchIncludeDirs[] = {
"/usr/include/mipsel-linux-gnu"};
const StringRef MIPS64MultiarchIncludeDirs[] = {
- "/usr/include/mips64-linux-gnu", "/usr/include/mips64-linux-gnuabi64"};
+ "/usr/include/mips64-linux-gnuabi64"};
const StringRef MIPS64ELMultiarchIncludeDirs[] = {
- "/usr/include/mips64el-linux-gnu",
"/usr/include/mips64el-linux-gnuabi64"};
+ const StringRef MIPSN32MultiarchIncludeDirs[] = {
+ "/usr/include/mips64-linux-gnuabin32"};
+ const StringRef MIPSN32ELMultiarchIncludeDirs[] = {
+ "/usr/include/mips64el-linux-gnuabin32"};
+ const StringRef MIPSR6MultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa32-linux-gnu"};
+ const StringRef MIPSR6ELMultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa32r6el-linux-gnu"};
+ const StringRef MIPS64R6MultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa64r6-linux-gnuabi64"};
+ const StringRef MIPS64R6ELMultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa64r6el-linux-gnuabi64"};
+ const StringRef MIPSN32R6MultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa64r6-linux-gnuabin32"};
+ const StringRef MIPSN32R6ELMultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa64r6el-linux-gnuabin32"};
const StringRef PPCMultiarchIncludeDirs[] = {
"/usr/include/powerpc-linux-gnu",
"/usr/include/powerpc-linux-gnuspe"};
@@ -738,16 +778,38 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
MultiarchIncludeDirs = ARMEBMultiarchIncludeDirs;
break;
case llvm::Triple::mips:
- MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
+ if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
+ MultiarchIncludeDirs = MIPSR6MultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
break;
case llvm::Triple::mipsel:
- MultiarchIncludeDirs = MIPSELMultiarchIncludeDirs;
+ if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
+ MultiarchIncludeDirs = MIPSR6ELMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPSELMultiarchIncludeDirs;
break;
case llvm::Triple::mips64:
- MultiarchIncludeDirs = MIPS64MultiarchIncludeDirs;
+ if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
+ if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
+ MultiarchIncludeDirs = MIPSN32R6MultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPS64R6MultiarchIncludeDirs;
+ else if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
+ MultiarchIncludeDirs = MIPSN32MultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPS64MultiarchIncludeDirs;
break;
case llvm::Triple::mips64el:
- MultiarchIncludeDirs = MIPS64ELMultiarchIncludeDirs;
+ if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
+ if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
+ MultiarchIncludeDirs = MIPSN32R6ELMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPS64R6ELMultiarchIncludeDirs;
+ else if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
+ MultiarchIncludeDirs = MIPSN32ELMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPS64ELMultiarchIncludeDirs;
break;
case llvm::Triple::ppc:
MultiarchIncludeDirs = PPCMultiarchIncludeDirs;
@@ -876,6 +938,9 @@ void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
// Freescale SDK C++ headers are directly in <sysroot>/usr/include/c++,
// without a subdirectory corresponding to the gcc version.
LibDir.str() + "/../include/c++",
+ // Cray's gcc installation puts headers under "g++" without a
+ // version suffix.
+ LibDir.str() + "/../include/g++",
};
for (const auto &IncludePath : LibStdCXXIncludePathCandidates) {
@@ -962,7 +1027,7 @@ void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
// Add linker option -u__llvm_runtime_variable to cause runtime
// initialization module to be linked in.
- if (!Args.hasArg(options::OPT_coverage))
+ if ((!Args.hasArg(options::OPT_coverage)) && (!Args.hasArg(options::OPT_ftest_coverage)))
CmdArgs.push_back(Args.MakeArgString(
Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
ToolChain::addProfileRTLibs(Args, CmdArgs);
diff --git a/lib/Driver/ToolChains/Linux.h b/lib/Driver/ToolChains/Linux.h
index 2f0c36190f..4a662cb4b4 100644
--- a/lib/Driver/ToolChains/Linux.h
+++ b/lib/Driver/ToolChains/Linux.h
@@ -37,6 +37,7 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ CXXStdlibType GetDefaultCXXStdlibType() const override;
bool isPIEDefault() const override;
bool IsMathErrnoDefault() const override;
SanitizerMask getSupportedSanitizers() const override;
diff --git a/lib/Driver/ToolChains/MSVC.cpp b/lib/Driver/ToolChains/MSVC.cpp
index f60f39563f..839f313623 100644
--- a/lib/Driver/ToolChains/MSVC.cpp
+++ b/lib/Driver/ToolChains/MSVC.cpp
@@ -722,15 +722,15 @@ bool MSVCToolChain::IsIntegratedAssemblerDefault() const {
}
bool MSVCToolChain::IsUnwindTablesDefault(const ArgList &Args) const {
- // Emit unwind tables by default on Win64. All non-x86_32 Windows platforms
- // such as ARM and PPC actually require unwind tables, but LLVM doesn't know
- // how to generate them yet.
-
// Don't emit unwind tables by default for MachO targets.
if (getTriple().isOSBinFormatMachO())
return false;
- return getArch() == llvm::Triple::x86_64;
+ // All non-x86_32 Windows targets require unwind tables. However, LLVM
+ // doesn't know how to generate them for all targets, so only enable
+ // the ones that are actually implemented.
+ return getArch() == llvm::Triple::x86_64 ||
+ getArch() == llvm::Triple::aarch64;
}
bool MSVCToolChain::isPICDefault() const {
diff --git a/lib/Driver/ToolChains/MSVC.h b/lib/Driver/ToolChains/MSVC.h
index 1db589ec97..ebca0018bb 100644
--- a/lib/Driver/ToolChains/MSVC.h
+++ b/lib/Driver/ToolChains/MSVC.h
@@ -11,6 +11,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSVC_H
#include "Cuda.h"
+#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -78,6 +79,18 @@ public:
bool isPIEDefault() const override;
bool isPICDefaultForced() const override;
+ /// Set CodeView as the default debug info format. Users can use -gcodeview
+ /// and -gdwarf to override the default.
+ codegenoptions::DebugInfoFormat getDefaultDebugFormat() const override {
+ return codegenoptions::DIF_CodeView;
+ }
+
+ /// Set the debugger tuning to "default", since we're definitely not tuning
+ /// for GDB.
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::Default;
+ }
+
enum class SubDirectoryType {
Bin,
Include,
diff --git a/lib/Driver/ToolChains/MinGW.cpp b/lib/Driver/ToolChains/MinGW.cpp
index a88e00f0c8..836d7c1551 100644
--- a/lib/Driver/ToolChains/MinGW.cpp
+++ b/lib/Driver/ToolChains/MinGW.cpp
@@ -10,10 +10,12 @@
#include "MinGW.h"
#include "InputInfo.h"
#include "CommonArgs.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -52,7 +54,7 @@ void tools::MinGW::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_gsplit_dwarf))
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
- SplitDebugName(Args, Inputs[0]));
+ SplitDebugName(Args, Output));
}
void tools::MinGW::Linker::AddLibGCC(const ArgList &Args,
@@ -95,7 +97,7 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
const ToolChain &TC = getToolChain();
const Driver &D = TC.getDriver();
- // const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
+ const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
ArgStringList CmdArgs;
@@ -187,8 +189,6 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
TC.AddFilePathLibArgs(Args, CmdArgs);
AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
- // TODO: Add ASan stuff here
-
// TODO: Add profile stuff here
if (TC.ShouldLinkCXXStdlib(Args)) {
@@ -220,8 +220,24 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lssp_nonshared");
CmdArgs.push_back("-lssp");
}
- if (Args.hasArg(options::OPT_fopenmp))
- CmdArgs.push_back("-lgomp");
+
+ if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false)) {
+ switch (TC.getDriver().getOpenMPRuntime(Args)) {
+ case Driver::OMPRT_OMP:
+ CmdArgs.push_back("-lomp");
+ break;
+ case Driver::OMPRT_IOMP5:
+ CmdArgs.push_back("-liomp5md");
+ break;
+ case Driver::OMPRT_GOMP:
+ CmdArgs.push_back("-lgomp");
+ break;
+ case Driver::OMPRT_Unknown:
+ // Already diagnosed.
+ break;
+ }
+ }
AddLibGCC(Args, CmdArgs);
@@ -231,6 +247,24 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
+ if (Sanitize.needsAsanRt()) {
+ // MinGW always links against a shared MSVCRT.
+ CmdArgs.push_back(
+ TC.getCompilerRTArgString(Args, "asan_dynamic", true));
+ CmdArgs.push_back(
+ TC.getCompilerRTArgString(Args, "asan_dynamic_runtime_thunk"));
+ CmdArgs.push_back(Args.MakeArgString("--require-defined"));
+ CmdArgs.push_back(Args.MakeArgString(TC.getArch() == llvm::Triple::x86
+ ? "___asan_seh_interceptor"
+ : "__asan_seh_interceptor"));
+ // Make sure the linker consider all object files from the dynamic
+ // runtime thunk.
+ CmdArgs.push_back(Args.MakeArgString("--whole-archive"));
+ CmdArgs.push_back(Args.MakeArgString(
+ TC.getCompilerRT(Args, "asan_dynamic_runtime_thunk")));
+ CmdArgs.push_back(Args.MakeArgString("--no-whole-archive"));
+ }
+
if (!HasWindowsApp) {
// Add system libraries. If linking to libwindowsapp.a, that import
// library replaces all these and we shouldn't accidentally try to
@@ -359,6 +393,10 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(Base + "lib");
// openSUSE
getFilePaths().push_back(Base + Arch + "/sys-root/mingw/lib");
+
+ NativeLLVMSupport =
+ Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER)
+ .equals_lower("lld");
}
bool toolchains::MinGW::IsIntegratedAssemblerDefault() const { return true; }
@@ -386,6 +424,10 @@ Tool *toolchains::MinGW::buildLinker() const {
return new tools::MinGW::Linker(*this);
}
+bool toolchains::MinGW::HasNativeLLVMSupport() const {
+ return NativeLLVMSupport;
+}
+
bool toolchains::MinGW::IsUnwindTablesDefault(const ArgList &Args) const {
return getArch() == llvm::Triple::x86_64;
}
@@ -407,6 +449,12 @@ toolchains::MinGW::GetExceptionModel(const ArgList &Args) const {
return llvm::ExceptionHandling::DwarfCFI;
}
+SanitizerMask toolchains::MinGW::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ return Res;
+}
+
void toolchains::MinGW::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
diff --git a/lib/Driver/ToolChains/MinGW.h b/lib/Driver/ToolChains/MinGW.h
index 0c3919d29f..04d23006ee 100644
--- a/lib/Driver/ToolChains/MinGW.h
+++ b/lib/Driver/ToolChains/MinGW.h
@@ -59,12 +59,16 @@ public:
MinGW(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ bool HasNativeLLVMSupport() const override;
+
bool IsIntegratedAssemblerDefault() const override;
bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefault() const override;
bool isPIEDefault() const override;
bool isPICDefaultForced() const override;
+ SanitizerMask getSupportedSanitizers() const override;
+
llvm::ExceptionHandling GetExceptionModel(
const llvm::opt::ArgList &Args) const override;
@@ -97,6 +101,8 @@ private:
void findGccLibDir();
llvm::ErrorOr<std::string> findGcc();
llvm::ErrorOr<std::string> findClangRelativeSysroot();
+
+ bool NativeLLVMSupport;
};
} // end namespace toolchains
diff --git a/lib/Driver/ToolChains/Minix.cpp b/lib/Driver/ToolChains/Minix.cpp
index 39e6f90b6e..7fadcb129d 100644
--- a/lib/Driver/ToolChains/Minix.cpp
+++ b/lib/Driver/ToolChains/Minix.cpp
@@ -8,13 +8,13 @@
//===----------------------------------------------------------------------===//
#include "Minix.h"
-#include "InputInfo.h"
#include "CommonArgs.h"
-#include "clang/Basic/VirtualFileSystem.h"
+#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang;
diff --git a/lib/Driver/ToolChains/NetBSD.cpp b/lib/Driver/ToolChains/NetBSD.cpp
index 02caafda16..73e230b3ea 100644
--- a/lib/Driver/ToolChains/NetBSD.cpp
+++ b/lib/Driver/ToolChains/NetBSD.cpp
@@ -164,7 +164,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- arm::appendEBLinkFlags(Args, CmdArgs, ToolChain.getEffectiveTriple());
+ arm::appendBE8LinkFlag(Args, CmdArgs, ToolChain.getEffectiveTriple());
CmdArgs.push_back("-m");
switch (ToolChain.getTriple().getEnvironment()) {
case llvm::Triple::EABI:
diff --git a/lib/Driver/ToolChains/OpenBSD.cpp b/lib/Driver/ToolChains/OpenBSD.cpp
index 7b98cd62bb..3d35d37b7d 100644
--- a/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/lib/Driver/ToolChains/OpenBSD.cpp
@@ -111,9 +111,9 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// handled somewhere else.
Args.ClaimAllArgs(options::OPT_w);
- if (getToolChain().getArch() == llvm::Triple::mips64)
+ if (ToolChain.getArch() == llvm::Triple::mips64)
CmdArgs.push_back("-EB");
- else if (getToolChain().getArch() == llvm::Triple::mips64el)
+ else if (ToolChain.getArch() == llvm::Triple::mips64el)
CmdArgs.push_back("-EL");
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
@@ -138,7 +138,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_pie))
CmdArgs.push_back("-pie");
- if (Args.hasArg(options::OPT_nopie))
+ if (Args.hasArg(options::OPT_nopie) || Args.hasArg(options::OPT_pg))
CmdArgs.push_back("-nopie");
if (Output.isFilename()) {
@@ -149,44 +149,40 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ const char *crt0 = nullptr;
+ const char *crtbegin = nullptr;
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("gcrt0.o")));
+ crt0 = "gcrt0.o";
else if (Args.hasArg(options::OPT_static) &&
!Args.hasArg(options::OPT_nopie))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("rcrt0.o")));
+ crt0 = "rcrt0.o";
else
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crt0.o")));
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ crt0 = "crt0.o";
+ crtbegin = "crtbegin.o";
} else {
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
+ crtbegin = "crtbeginS.o";
}
- }
- std::string Triple = getToolChain().getTripleString();
- if (Triple.substr(0, 6) == "x86_64")
- Triple.replace(0, 6, "amd64");
- CmdArgs.push_back(
- Args.MakeArgString("-L/usr/lib/gcc-lib/" + Triple + "/4.2.1"));
- CmdArgs.push_back(Args.MakeArgString("-L/usr/lib"));
+ if (crt0)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt0)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
- Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_e, options::OPT_s, options::OPT_t,
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_e,
+ options::OPT_s, options::OPT_t,
options::OPT_Z_Flag, options::OPT_r});
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX()) {
- if (getToolChain().ShouldLinkCXXStdlib(Args))
- getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (ToolChain.ShouldLinkCXXStdlib(Args))
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
if (Args.hasArg(options::OPT_pg))
CmdArgs.push_back("-lm_p");
else
@@ -202,7 +198,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
// FIXME: For some reason GCC passes -lgcc before adding
// the default system libraries. Just mimic this for now.
- CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lcompiler_rt");
if (Args.hasArg(options::OPT_pthread)) {
if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg))
@@ -218,21 +214,22 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lc");
}
- CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lcompiler_rt");
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ const char *crtend = nullptr;
if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ crtend = "crtend.o";
else
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
+ crtend = "crtendS.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
}
const char *Exec = Args.MakeArgString(
- !NeedsSanitizerDeps ? getToolChain().GetLinkerPath()
- : getToolChain().GetProgramPath("ld.lld"));
+ !NeedsSanitizerDeps ? ToolChain.GetLinkerPath()
+ : ToolChain.GetProgramPath("ld.lld"));
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
@@ -257,8 +254,7 @@ SanitizerMask OpenBSD::getSupportedSanitizers() const {
OpenBSD::OpenBSD(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
- getFilePaths().push_back(getDriver().Dir + "/../lib");
- getFilePaths().push_back("/usr/lib");
+ getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
}
void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
diff --git a/lib/Driver/ToolChains/OpenBSD.h b/lib/Driver/ToolChains/OpenBSD.h
index bf8dfa4653..1912abdb95 100644
--- a/lib/Driver/ToolChains/OpenBSD.h
+++ b/lib/Driver/ToolChains/OpenBSD.h
@@ -58,6 +58,14 @@ public:
bool IsMathErrnoDefault() const override { return false; }
bool IsObjCNonFragileABIDefault() const override { return true; }
bool isPIEDefault() const override { return true; }
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return ToolChain::RLT_CompilerRT;
+ }
+ CXXStdlibType GetDefaultCXXStdlibType() const override {
+ return ToolChain::CST_Libcxx;
+ }
+
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
diff --git a/lib/Driver/XRayArgs.cpp b/lib/Driver/XRayArgs.cpp
index 3d058c2f4c..3d294998cd 100644
--- a/lib/Driver/XRayArgs.cpp
+++ b/lib/Driver/XRayArgs.cpp
@@ -58,6 +58,15 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
D.Diag(diag::err_drv_clang_unsupported)
<< (std::string(XRayInstrumentOption) + " on " + Triple.str());
}
+ } else if (Triple.getOS() == llvm::Triple::Fuchsia) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86_64:
+ case llvm::Triple::aarch64:
+ break;
+ default:
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ }
} else {
D.Diag(diag::err_drv_clang_unsupported)
<< (std::string(XRayInstrumentOption) + " on " + Triple.str());
diff --git a/lib/Edit/RewriteObjCFoundationAPI.cpp b/lib/Edit/RewriteObjCFoundationAPI.cpp
index 3a305ecd94..7c9ab17009 100644
--- a/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -1078,13 +1078,16 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_NonAtomicToAtomic:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
return false;
case CK_BooleanToSignedIntegral:
llvm_unreachable("OpenCL-specific cast in Objective-C?");
+
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
+ llvm_unreachable("Fixed point types are disabled for Objective-C");
}
}
diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp
index c9c96456f4..c369b94b99 100644
--- a/lib/Format/ContinuationIndenter.cpp
+++ b/lib/Format/ContinuationIndenter.cpp
@@ -1135,7 +1135,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// }, a, b, c);
if (Current.isNot(tok::comment) && Previous &&
Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
- !Previous->is(TT_DictLiteral) && State.Stack.size() > 1) {
+ !Previous->is(TT_DictLiteral) && State.Stack.size() > 1 &&
+ !State.Stack.back().HasMultipleNestedBlocks) {
if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline)
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
State.Stack[i].NoLineBreak = true;
@@ -1502,10 +1503,25 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// violate the rectangle rule and visually flows within the surrounding
// source.
bool ContentStartsOnNewline = Current.TokenText[OldPrefixSize] == '\n';
- unsigned NextStartColumn =
- ContentStartsOnNewline
- ? State.Stack.back().NestedBlockIndent + Style.IndentWidth
- : FirstStartColumn;
+ // If this token is the last parameter (checked by looking if it's followed by
+ // `)`, the base the indent off the line's nested block indent. Otherwise,
+ // base the indent off the arguments indent, so we can achieve:
+ // fffffffffff(1, 2, 3, R"pb(
+ // key1: 1 #
+ // key2: 2)pb");
+ //
+ // fffffffffff(1, 2, 3,
+ // R"pb(
+ // key1: 1 #
+ // key2: 2
+ // )pb",
+ // 5);
+ unsigned CurrentIndent = (Current.Next && Current.Next->is(tok::r_paren))
+ ? State.Stack.back().NestedBlockIndent
+ : State.Stack.back().Indent;
+ unsigned NextStartColumn = ContentStartsOnNewline
+ ? CurrentIndent + Style.IndentWidth
+ : FirstStartColumn;
// The last start column is the column the raw string suffix starts if it is
// put on a newline.
@@ -1517,7 +1533,7 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// indent.
unsigned LastStartColumn = Current.NewlinesBefore
? FirstStartColumn - NewPrefixSize
- : State.Stack.back().NestedBlockIndent;
+ : CurrentIndent;
std::pair<tooling::Replacements, unsigned> Fixes = internal::reformat(
RawStringStyle, RawText, {tooling::Range(0, RawText.size())},
@@ -1527,8 +1543,7 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
auto NewCode = applyAllReplacements(RawText, Fixes.first);
tooling::Replacements NoFixes;
if (!NewCode) {
- State.Column += Current.ColumnWidth;
- return 0;
+ return addMultilineToken(Current, State);
}
if (!DryRun) {
if (NewDelimiter != OldDelimiter) {
@@ -1577,6 +1592,13 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
unsigned PrefixExcessCharacters =
StartColumn + NewPrefixSize > Style.ColumnLimit ?
StartColumn + NewPrefixSize - Style.ColumnLimit : 0;
+ bool IsMultiline =
+ ContentStartsOnNewline || (NewCode->find('\n') != std::string::npos);
+ if (IsMultiline) {
+ // Break before further function parameters on all levels.
+ for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
+ State.Stack[i].BreakBeforeParameter = true;
+ }
return Fixes.second + PrefixExcessCharacters * Style.PenaltyExcessCharacter;
}
diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp
index 0637f76f07..2c4f876054 100644
--- a/lib/Format/Format.cpp
+++ b/lib/Format/Format.cpp
@@ -29,7 +29,6 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Inclusions/HeaderIncludes.h"
#include "llvm/ADT/STLExtras.h"
@@ -38,6 +37,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Regex.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLTraits.h"
#include <algorithm>
#include <memory>
@@ -414,6 +414,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
Style.IndentWrappedFunctionNames);
+ IO.mapOptional("JavaImportGroups", Style.JavaImportGroups);
IO.mapOptional("JavaScriptQuotes", Style.JavaScriptQuotes);
IO.mapOptional("JavaScriptWrapImports", Style.JavaScriptWrapImports);
IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
@@ -469,6 +470,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("SpacesInParentheses", Style.SpacesInParentheses);
IO.mapOptional("SpacesInSquareBrackets", Style.SpacesInSquareBrackets);
IO.mapOptional("Standard", Style.Standard);
+ IO.mapOptional("StatementMacros", Style.StatementMacros);
IO.mapOptional("TabWidth", Style.TabWidth);
IO.mapOptional("UseTab", Style.UseTab);
}
@@ -714,6 +716,8 @@ FormatStyle getLLVMStyle() {
LLVMStyle.DisableFormat = false;
LLVMStyle.SortIncludes = true;
LLVMStyle.SortUsingDeclarations = true;
+ LLVMStyle.StatementMacros.push_back("Q_UNUSED");
+ LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
return LLVMStyle;
}
@@ -844,6 +848,20 @@ FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
ChromiumStyle.BreakAfterJavaFieldAnnotations = true;
ChromiumStyle.ContinuationIndentWidth = 8;
ChromiumStyle.IndentWidth = 4;
+ // See styleguide for import groups:
+ // https://chromium.googlesource.com/chromium/src/+/master/styleguide/java/java.md#Import-Order
+ ChromiumStyle.JavaImportGroups = {
+ "android",
+ "com",
+ "dalvik",
+ "junit",
+ "org",
+ "com.google.android.apps.chrome",
+ "org.chromium",
+ "java",
+ "javax",
+ };
+ ChromiumStyle.SortIncludes = true;
} else if (Language == FormatStyle::LK_JavaScript) {
ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
ChromiumStyle.AllowShortLoopsOnASingleLine = false;
@@ -1486,7 +1504,8 @@ public:
SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
FormatTokenLexer &Tokens) override {
assert(Style.Language == FormatStyle::LK_Cpp);
- IsObjC = guessIsObjC(AnnotatedLines, Tokens.getKeywords());
+ IsObjC = guessIsObjC(Env.getSourceManager(), AnnotatedLines,
+ Tokens.getKeywords());
tooling::Replacements Result;
return {Result, 0};
}
@@ -1494,8 +1513,10 @@ public:
bool isObjC() { return IsObjC; }
private:
- static bool guessIsObjC(const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
- const AdditionalKeywords &Keywords) {
+ static bool
+ guessIsObjC(const SourceManager &SourceManager,
+ const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ const AdditionalKeywords &Keywords) {
// Keep this array sorted, since we are binary searching over it.
static constexpr llvm::StringLiteral FoundationIdentifiers[] = {
"CGFloat",
@@ -1586,9 +1607,15 @@ private:
TT_ObjCBlockLBrace, TT_ObjCBlockLParen,
TT_ObjCDecl, TT_ObjCForIn, TT_ObjCMethodExpr,
TT_ObjCMethodSpecifier, TT_ObjCProperty)) {
+ LLVM_DEBUG(llvm::dbgs()
+ << "Detected ObjC at location "
+ << FormatTok->Tok.getLocation().printToString(
+ SourceManager)
+ << " token: " << FormatTok->TokenText << " token type: "
+ << getTokenTypeName(FormatTok->Type) << "\n");
return true;
}
- if (guessIsObjC(Line->Children, Keywords))
+ if (guessIsObjC(SourceManager, Line->Children, Keywords))
return true;
}
}
@@ -1605,6 +1632,14 @@ struct IncludeDirective {
int Category;
};
+struct JavaImportDirective {
+ StringRef Identifier;
+ StringRef Text;
+ unsigned Offset;
+ std::vector<StringRef> AssociatedCommentLines;
+ bool IsStatic;
+};
+
} // end anonymous namespace
// Determines whether 'Ranges' intersects with ('Start', 'End').
@@ -1723,7 +1758,7 @@ static void sortCppIncludes(const FormatStyle &Style,
namespace {
-const char IncludeRegexPattern[] =
+const char CppIncludeRegexPattern[] =
R"(^[\t\ ]*#[\t\ ]*(import|include)[^"<]*(["<][^">]*[">]))";
} // anonymous namespace
@@ -1735,7 +1770,7 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
unsigned *Cursor) {
unsigned Prev = 0;
unsigned SearchFrom = 0;
- llvm::Regex IncludeRegex(IncludeRegexPattern);
+ llvm::Regex IncludeRegex(CppIncludeRegexPattern);
SmallVector<StringRef, 4> Matches;
SmallVector<IncludeDirective, 16> IncludesInBlock;
@@ -1794,6 +1829,149 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
return Replaces;
}
+// Returns group number to use as a first order sort on imports. Gives UINT_MAX
+// if the import does not match any given groups.
+static unsigned findJavaImportGroup(const FormatStyle &Style,
+ StringRef ImportIdentifier) {
+ unsigned LongestMatchIndex = UINT_MAX;
+ unsigned LongestMatchLength = 0;
+ for (unsigned I = 0; I < Style.JavaImportGroups.size(); I++) {
+ std::string GroupPrefix = Style.JavaImportGroups[I];
+ if (ImportIdentifier.startswith(GroupPrefix) &&
+ GroupPrefix.length() > LongestMatchLength) {
+ LongestMatchIndex = I;
+ LongestMatchLength = GroupPrefix.length();
+ }
+ }
+ return LongestMatchIndex;
+}
+
+// Sorts and deduplicates a block of includes given by 'Imports' based on
+// JavaImportGroups, then adding the necessary replacement to 'Replaces'.
+// Import declarations with the same text will be deduplicated. Between each
+// import group, a newline is inserted, and within each import group, a
+// lexicographic sort based on ASCII value is performed.
+static void sortJavaImports(const FormatStyle &Style,
+ const SmallVectorImpl<JavaImportDirective> &Imports,
+ ArrayRef<tooling::Range> Ranges, StringRef FileName,
+ tooling::Replacements &Replaces) {
+ unsigned ImportsBeginOffset = Imports.front().Offset;
+ unsigned ImportsEndOffset =
+ Imports.back().Offset + Imports.back().Text.size();
+ unsigned ImportsBlockSize = ImportsEndOffset - ImportsBeginOffset;
+ if (!affectsRange(Ranges, ImportsBeginOffset, ImportsEndOffset))
+ return;
+ SmallVector<unsigned, 16> Indices;
+ SmallVector<unsigned, 16> JavaImportGroups;
+ for (unsigned i = 0, e = Imports.size(); i != e; ++i) {
+ Indices.push_back(i);
+ JavaImportGroups.push_back(
+ findJavaImportGroup(Style, Imports[i].Identifier));
+ }
+ llvm::sort(Indices.begin(), Indices.end(), [&](unsigned LHSI, unsigned RHSI) {
+ // Negating IsStatic to push static imports above non-static imports.
+ return std::make_tuple(!Imports[LHSI].IsStatic, JavaImportGroups[LHSI],
+ Imports[LHSI].Identifier) <
+ std::make_tuple(!Imports[RHSI].IsStatic, JavaImportGroups[RHSI],
+ Imports[RHSI].Identifier);
+ });
+
+ // Deduplicate imports.
+ Indices.erase(std::unique(Indices.begin(), Indices.end(),
+ [&](unsigned LHSI, unsigned RHSI) {
+ return Imports[LHSI].Text == Imports[RHSI].Text;
+ }),
+ Indices.end());
+
+ bool CurrentIsStatic = Imports[Indices.front()].IsStatic;
+ unsigned CurrentImportGroup = JavaImportGroups[Indices.front()];
+
+ std::string result;
+ for (unsigned Index : Indices) {
+ if (!result.empty()) {
+ result += "\n";
+ if (CurrentIsStatic != Imports[Index].IsStatic ||
+ CurrentImportGroup != JavaImportGroups[Index])
+ result += "\n";
+ }
+ for (StringRef CommentLine : Imports[Index].AssociatedCommentLines) {
+ result += CommentLine;
+ result += "\n";
+ }
+ result += Imports[Index].Text;
+ CurrentIsStatic = Imports[Index].IsStatic;
+ CurrentImportGroup = JavaImportGroups[Index];
+ }
+
+ auto Err = Replaces.add(tooling::Replacement(FileName, Imports.front().Offset,
+ ImportsBlockSize, result));
+ // FIXME: better error handling. For now, just skip the replacement for the
+ // release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
+}
+
+namespace {
+
+const char JavaImportRegexPattern[] =
+ "^[\t ]*import[\t ]*(static[\t ]*)?([^\t ]*)[\t ]*;";
+
+} // anonymous namespace
+
+tooling::Replacements sortJavaImports(const FormatStyle &Style, StringRef Code,
+ ArrayRef<tooling::Range> Ranges,
+ StringRef FileName,
+ tooling::Replacements &Replaces) {
+ unsigned Prev = 0;
+ unsigned SearchFrom = 0;
+ llvm::Regex ImportRegex(JavaImportRegexPattern);
+ SmallVector<StringRef, 4> Matches;
+ SmallVector<JavaImportDirective, 16> ImportsInBlock;
+ std::vector<StringRef> AssociatedCommentLines;
+
+ bool FormattingOff = false;
+
+ for (;;) {
+ auto Pos = Code.find('\n', SearchFrom);
+ StringRef Line =
+ Code.substr(Prev, (Pos != StringRef::npos ? Pos : Code.size()) - Prev);
+
+ StringRef Trimmed = Line.trim();
+ if (Trimmed == "// clang-format off")
+ FormattingOff = true;
+ else if (Trimmed == "// clang-format on")
+ FormattingOff = false;
+
+ if (ImportRegex.match(Line, &Matches)) {
+ if (FormattingOff) {
+ // If at least one import line has formatting turned off, turn off
+ // formatting entirely.
+ return Replaces;
+ }
+ StringRef Static = Matches[1];
+ StringRef Identifier = Matches[2];
+ bool IsStatic = false;
+ if (Static.contains("static")) {
+ IsStatic = true;
+ }
+ ImportsInBlock.push_back({Identifier, Line, Prev, AssociatedCommentLines, IsStatic});
+ AssociatedCommentLines.clear();
+ } else if (Trimmed.size() > 0 && !ImportsInBlock.empty()) {
+ // Associating comments within the imports with the nearest import below
+ AssociatedCommentLines.push_back(Line);
+ }
+ Prev = Pos + 1;
+ if (Pos == StringRef::npos || Pos + 1 == Code.size())
+ break;
+ SearchFrom = Pos + 1;
+ }
+ if (!ImportsInBlock.empty())
+ sortJavaImports(Style, ImportsInBlock, Ranges, FileName, Replaces);
+ return Replaces;
+}
+
bool isMpegTS(StringRef Code) {
// MPEG transport streams use the ".ts" file extension. clang-format should
// not attempt to format those. MPEG TS' frame format starts with 0x47 every
@@ -1816,6 +1994,8 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
return Replaces;
if (Style.Language == FormatStyle::LanguageKind::LK_JavaScript)
return sortJavaScriptImports(Style, Code, Ranges, FileName);
+ if (Style.Language == FormatStyle::LanguageKind::LK_Java)
+ return sortJavaImports(Style, Code, Ranges, FileName, Replaces);
sortCppIncludes(Style, Code, Ranges, FileName, Replaces, Cursor);
return Replaces;
}
@@ -1869,7 +2049,8 @@ namespace {
inline bool isHeaderInsertion(const tooling::Replacement &Replace) {
return Replace.getOffset() == UINT_MAX && Replace.getLength() == 0 &&
- llvm::Regex(IncludeRegexPattern).match(Replace.getReplacementText());
+ llvm::Regex(CppIncludeRegexPattern)
+ .match(Replace.getReplacementText());
}
inline bool isHeaderDeletion(const tooling::Replacement &Replace) {
@@ -1922,7 +2103,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
}
}
- llvm::Regex IncludeRegex = llvm::Regex(IncludeRegexPattern);
+ llvm::Regex IncludeRegex = llvm::Regex(CppIncludeRegexPattern);
llvm::SmallVector<StringRef, 4> Matches;
for (const auto &R : HeaderInsertions) {
auto IncludeDirective = R.getReplacementText();
@@ -2092,8 +2273,7 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
bool AlternativeOperators = Style.isCpp();
LangOpts.CXXOperatorNames = AlternativeOperators ? 1 : 0;
LangOpts.Bool = 1;
- LangOpts.ObjC1 = 1;
- LangOpts.ObjC2 = 1;
+ LangOpts.ObjC = 1;
LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
LangOpts.DeclSpecKeyword = 1; // To get __declspec.
return LangOpts;
@@ -2154,9 +2334,10 @@ const char *DefaultFallbackStyle = "LLVM";
llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
StringRef FallbackStyleName,
- StringRef Code, vfs::FileSystem *FS) {
+ StringRef Code,
+ llvm::vfs::FileSystem *FS) {
if (!FS) {
- FS = vfs::getRealFileSystem().get();
+ FS = llvm::vfs::getRealFileSystem().get();
}
FormatStyle Style = getLLVMStyle();
Style.Language = guessLanguage(FileName, Code);
diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h
index 3733b888dd..10390c4291 100644
--- a/lib/Format/FormatToken.h
+++ b/lib/Format/FormatToken.h
@@ -86,6 +86,7 @@ namespace format {
TYPE(RegexLiteral) \
TYPE(SelectorName) \
TYPE(StartOfName) \
+ TYPE(StatementMacro) \
TYPE(StructuredBindingLSquare) \
TYPE(TemplateCloser) \
TYPE(TemplateOpener) \
@@ -188,10 +189,6 @@ struct FormatToken {
bool ClosesTemplateDeclaration = false;
/// Number of parameters, if this is "(", "[" or "<".
- ///
- /// This is initialized to 1 as we don't need to distinguish functions with
- /// 0 parameters from functions with 1 parameter. Thus, we can simply count
- /// the number of commas.
unsigned ParameterCount = 0;
/// Number of parameters that are nested blocks,
@@ -268,7 +265,7 @@ struct FormatToken {
/// \c true if this token ends a binary expression.
bool EndsBinaryExpression = false;
- /// Is this is an operator (or "."/"->") in a sequence of operators
+ /// If this is an operator (or "."/"->") in a sequence of operators
/// with the same precedence, contains the 0-based operator index.
unsigned OperatorIndex = 0;
@@ -602,6 +599,8 @@ public:
/// Notifies the \c Role that a comma was found.
virtual void CommaFound(const FormatToken *Token) {}
+ virtual const FormatToken *lastComma() { return nullptr; }
+
protected:
const FormatStyle &Style;
};
@@ -624,6 +623,12 @@ public:
Commas.push_back(Token);
}
+ const FormatToken *lastComma() override {
+ if (Commas.empty())
+ return nullptr;
+ return Commas.back();
+ }
+
private:
/// A struct that holds information on how to format a given list with
/// a specific number of columns.
diff --git a/lib/Format/FormatTokenLexer.cpp b/lib/Format/FormatTokenLexer.cpp
index 7f2edf102c..146f5d68b5 100644
--- a/lib/Format/FormatTokenLexer.cpp
+++ b/lib/Format/FormatTokenLexer.cpp
@@ -37,8 +37,9 @@ FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
Lex->SetKeepWhitespaceMode(true);
for (const std::string &ForEachMacro : Style.ForEachMacros)
- ForEachMacros.push_back(&IdentTable.get(ForEachMacro));
- llvm::sort(ForEachMacros);
+ Macros.insert({&IdentTable.get(ForEachMacro), TT_ForEachMacro});
+ for (const std::string &StatementMacro : Style.StatementMacros)
+ Macros.insert({&IdentTable.get(StatementMacro), TT_StatementMacro});
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@@ -657,12 +658,12 @@ FormatToken *FormatTokenLexer::getNextToken() {
}
if (Style.isCpp()) {
+ auto it = Macros.find(FormatTok->Tok.getIdentifierInfo());
if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_define) &&
- std::find(ForEachMacros.begin(), ForEachMacros.end(),
- FormatTok->Tok.getIdentifierInfo()) != ForEachMacros.end()) {
- FormatTok->Type = TT_ForEachMacro;
+ it != Macros.end()) {
+ FormatTok->Type = it->second;
} else if (FormatTok->is(tok::identifier)) {
if (MacroBlockBeginRegex.match(Text)) {
FormatTok->Type = TT_MacroBlockBegin;
diff --git a/lib/Format/FormatTokenLexer.h b/lib/Format/FormatTokenLexer.h
index 3b79d27480..0cf357c85f 100644
--- a/lib/Format/FormatTokenLexer.h
+++ b/lib/Format/FormatTokenLexer.h
@@ -22,6 +22,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "llvm/Support/Regex.h"
+#include "llvm/ADT/MapVector.h"
#include <stack>
@@ -99,7 +100,8 @@ private:
// Index (in 'Tokens') of the last token that starts a new line.
unsigned FirstInLineIndex;
SmallVector<FormatToken *, 16> Tokens;
- SmallVector<IdentifierInfo *, 8> ForEachMacros;
+
+ llvm::SmallMapVector<IdentifierInfo *, TokenType, 8> Macros;
bool FormattingDisabled;
diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp
index 2cb4c0361b..a04f2e672b 100644
--- a/lib/Format/TokenAnnotator.cpp
+++ b/lib/Format/TokenAnnotator.cpp
@@ -366,7 +366,8 @@ private:
// specifier parameter, although this is technically valid:
// [[foo(:)]]
if (AttrTok->is(tok::colon) ||
- AttrTok->startsSequence(tok::identifier, tok::identifier))
+ AttrTok->startsSequence(tok::identifier, tok::identifier) ||
+ AttrTok->startsSequence(tok::r_paren, tok::identifier))
return false;
if (AttrTok->is(tok::ellipsis))
return true;
@@ -398,9 +399,11 @@ private:
bool IsCpp11AttributeSpecifier = isCpp11AttributeSpecifier(*Left) ||
Contexts.back().InCpp11AttributeSpecifier;
+ bool InsideInlineASM = Line.startsWith(tok::kw_asm);
bool StartsObjCMethodExpr =
- !CppArrayTemplates && Style.isCpp() && !IsCpp11AttributeSpecifier &&
- Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
+ !InsideInlineASM && !CppArrayTemplates && Style.isCpp() &&
+ !IsCpp11AttributeSpecifier && Contexts.back().CanBeExpression &&
+ Left->isNot(TT_LambdaLSquare) &&
!CurrentToken->isOneOf(tok::l_brace, tok::r_square) &&
(!Parent ||
Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
@@ -2555,8 +2558,11 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
if (Left.is(TT_TemplateCloser) && Left.MatchingParen &&
Left.MatchingParen->Previous &&
- Left.MatchingParen->Previous->is(tok::period))
+ (Left.MatchingParen->Previous->is(tok::period) ||
+ Left.MatchingParen->Previous->is(tok::coloncolon)))
+ // Java call to generic function with explicit type:
// A.<B<C<...>>>DoSomething();
+ // A::<B<C<...>>>DoSomething(); // With a Java 8 method reference.
return false;
if (Left.is(TT_TemplateCloser) && Right.is(tok::l_square))
return false;
@@ -2776,6 +2782,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (!Style.SpaceBeforeAssignmentOperators &&
Right.getPrecedence() == prec::Assignment)
return false;
+ if (Style.Language == FormatStyle::LK_Java && Right.is(tok::coloncolon) &&
+ (Left.is(tok::identifier) || Left.is(tok::kw_this)))
+ return false;
if (Right.is(tok::coloncolon) && Left.is(tok::identifier))
// Generally don't remove existing spaces between an identifier and "::".
// The identifier might actually be a macro name such as ALWAYS_INLINE. If
@@ -2868,6 +2877,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
} else if (Style.Language == FormatStyle::LK_Cpp ||
Style.Language == FormatStyle::LK_ObjC ||
Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TableGen ||
Style.Language == FormatStyle::LK_TextProto) {
if (Left.isStringLiteral() && Right.isStringLiteral())
return true;
@@ -3043,6 +3053,30 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
}
+ // Deal with lambda arguments in C++ - we want consistent line breaks whether
+ // they happen to be at arg0, arg1 or argN. The selection is a bit nuanced
+ // as aggressive line breaks are placed when the lambda is not the last arg.
+ if ((Style.Language == FormatStyle::LK_Cpp ||
+ Style.Language == FormatStyle::LK_ObjC) &&
+ Left.is(tok::l_paren) && Left.BlockParameterCount > 0 &&
+ !Right.isOneOf(tok::l_paren, TT_LambdaLSquare)) {
+ // Multiple lambdas in the same function call force line breaks.
+ if (Left.BlockParameterCount > 1)
+ return true;
+
+ // A lambda followed by another arg forces a line break.
+ if (!Left.Role)
+ return false;
+ auto Comma = Left.Role->lastComma();
+ if (!Comma)
+ return false;
+ auto Next = Comma->getNextNonComment();
+ if (!Next)
+ return false;
+ if (!Next->isOneOf(TT_LambdaLSquare, tok::l_brace, tok::caret))
+ return true;
+ }
+
return false;
}
@@ -3080,8 +3114,21 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// Don't wrap between ":" and "!" of a strict prop init ("field!: type;").
if (Left.is(tok::exclaim) && Right.is(tok::colon))
return false;
- if (Right.is(Keywords.kw_is))
- return false;
+ // Look for is type annotations like:
+ // function f(): a is B { ... }
+ // Do not break before is in these cases.
+ if (Right.is(Keywords.kw_is)) {
+ const FormatToken* Next = Right.getNextNonComment();
+ // If `is` is followed by a colon, it's likely that it's a dict key, so
+ // ignore it for this check.
+ // For example this is common in Polymer:
+ // Polymer({
+ // is: 'name',
+ // ...
+ // });
+ if (!Next || !Next->is(tok::colon))
+ return false;
+ }
if (Left.is(Keywords.kw_in))
return Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None;
if (Right.is(Keywords.kw_in))
diff --git a/lib/Format/UnwrappedLineFormatter.cpp b/lib/Format/UnwrappedLineFormatter.cpp
index 76b495f453..6b6a9aff46 100644
--- a/lib/Format/UnwrappedLineFormatter.cpp
+++ b/lib/Format/UnwrappedLineFormatter.cpp
@@ -988,8 +988,7 @@ private:
Path.push_front(Best);
Best = Best->Previous;
}
- for (std::deque<StateNode *>::iterator I = Path.begin(), E = Path.end();
- I != E; ++I) {
+ for (auto I = Path.begin(), E = Path.end(); I != E; ++I) {
unsigned Penalty = 0;
formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty);
Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false);
@@ -998,8 +997,8 @@ private:
printLineState((*I)->Previous->State);
if ((*I)->NewLine) {
llvm::dbgs() << "Penalty for placing "
- << (*I)->Previous->State.NextToken->Tok.getName() << ": "
- << Penalty << "\n";
+ << (*I)->Previous->State.NextToken->Tok.getName()
+ << " on a new line: " << Penalty << "\n";
}
});
}
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index 09cd660e6d..3cd3c8f9cd 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -480,6 +480,10 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
}
LBraceStack.pop_back();
break;
+ case tok::identifier:
+ if (!Tok->is(TT_StatementMacro))
+ break;
+ LLVM_FALLTHROUGH;
case tok::at:
case tok::semi:
case tok::kw_if:
@@ -1108,6 +1112,10 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
}
}
+ if (Style.isCpp() && FormatTok->is(TT_StatementMacro)) {
+ parseStatementMacro();
+ return;
+ }
// In all other cases, parse the declaration.
break;
default:
@@ -1122,6 +1130,10 @@ void UnwrappedLineParser::parseStructuralElement() {
nextToken();
parseBracedList();
break;
+ } else if (Style.Language == FormatStyle::LK_Java &&
+ FormatTok->is(Keywords.kw_interface)) {
+ nextToken();
+ break;
}
switch (FormatTok->Tok.getObjCKeywordID()) {
case tok::objc_public:
@@ -1309,6 +1321,11 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
}
+ if (Style.isCpp() && FormatTok->is(TT_StatementMacro)) {
+ parseStatementMacro();
+ return;
+ }
+
// See if the following token should start a new unwrapped line.
StringRef Text = FormatTok->TokenText;
nextToken();
@@ -2151,6 +2168,8 @@ void UnwrappedLineParser::parseObjCMethod() {
addUnwrappedLine();
return;
} else if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Style.BraceWrapping.AfterFunction)
+ addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
return;
@@ -2328,6 +2347,16 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
}
}
+void UnwrappedLineParser::parseStatementMacro()
+{
+ nextToken();
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
+ if (FormatTok->is(tok::semi))
+ nextToken();
+ addUnwrappedLine();
+}
+
LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
StringRef Prefix = "") {
llvm::dbgs() << Prefix << "Line(" << Line.Level
diff --git a/lib/Format/UnwrappedLineParser.h b/lib/Format/UnwrappedLineParser.h
index 87254832c6..55d60dff91 100644
--- a/lib/Format/UnwrappedLineParser.h
+++ b/lib/Format/UnwrappedLineParser.h
@@ -126,6 +126,7 @@ private:
void parseObjCInterfaceOrImplementation();
bool parseObjCProtocol();
void parseJavaScriptEs6ImportExport();
+ void parseStatementMacro();
bool tryToParseLambda();
bool tryToParseLambdaIntroducer();
void tryToParseJSFunction();
diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp
index 6e2c87db29..28834a2de8 100644
--- a/lib/Frontend/ASTConsumers.cpp
+++ b/lib/Frontend/ASTConsumers.cpp
@@ -193,342 +193,3 @@ void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
std::unique_ptr<ASTConsumer> clang::CreateASTViewer() {
return llvm::make_unique<ASTViewer>();
}
-
-//===----------------------------------------------------------------------===//
-/// DeclContextPrinter - Decl and DeclContext Visualization
-
-namespace {
-
-class DeclContextPrinter : public ASTConsumer {
- raw_ostream& Out;
-public:
- DeclContextPrinter() : Out(llvm::errs()) {}
-
- void HandleTranslationUnit(ASTContext &C) override {
- PrintDeclContext(C.getTranslationUnitDecl(), 4);
- }
-
- void PrintDeclContext(const DeclContext* DC, unsigned Indentation);
-};
-} // end anonymous namespace
-
-void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
- unsigned Indentation) {
- // Print DeclContext name.
- switch (DC->getDeclKind()) {
- case Decl::TranslationUnit:
- Out << "[translation unit] " << DC;
- break;
- case Decl::Namespace: {
- Out << "[namespace] ";
- const NamespaceDecl* ND = cast<NamespaceDecl>(DC);
- Out << *ND;
- break;
- }
- case Decl::Enum: {
- const EnumDecl* ED = cast<EnumDecl>(DC);
- if (ED->isCompleteDefinition())
- Out << "[enum] ";
- else
- Out << "<enum> ";
- Out << *ED;
- break;
- }
- case Decl::Record: {
- const RecordDecl* RD = cast<RecordDecl>(DC);
- if (RD->isCompleteDefinition())
- Out << "[struct] ";
- else
- Out << "<struct> ";
- Out << *RD;
- break;
- }
- case Decl::CXXRecord: {
- const CXXRecordDecl* RD = cast<CXXRecordDecl>(DC);
- if (RD->isCompleteDefinition())
- Out << "[class] ";
- else
- Out << "<class> ";
- Out << *RD << ' ' << DC;
- break;
- }
- case Decl::ObjCMethod:
- Out << "[objc method]";
- break;
- case Decl::ObjCInterface:
- Out << "[objc interface]";
- break;
- case Decl::ObjCCategory:
- Out << "[objc category]";
- break;
- case Decl::ObjCProtocol:
- Out << "[objc protocol]";
- break;
- case Decl::ObjCImplementation:
- Out << "[objc implementation]";
- break;
- case Decl::ObjCCategoryImpl:
- Out << "[objc categoryimpl]";
- break;
- case Decl::LinkageSpec:
- Out << "[linkage spec]";
- break;
- case Decl::Block:
- Out << "[block]";
- break;
- case Decl::Function: {
- const FunctionDecl* FD = cast<FunctionDecl>(DC);
- if (FD->doesThisDeclarationHaveABody())
- Out << "[function] ";
- else
- Out << "<function> ";
- Out << *FD;
- // Print the parameters.
- Out << "(";
- bool PrintComma = false;
- for (auto I : FD->parameters()) {
- if (PrintComma)
- Out << ", ";
- else
- PrintComma = true;
- Out << *I;
- }
- Out << ")";
- break;
- }
- case Decl::CXXMethod: {
- const CXXMethodDecl* D = cast<CXXMethodDecl>(DC);
- if (D->isOutOfLine())
- Out << "[c++ method] ";
- else if (D->isImplicit())
- Out << "(c++ method) ";
- else
- Out << "<c++ method> ";
- Out << *D;
- // Print the parameters.
- Out << "(";
- bool PrintComma = false;
- for (ParmVarDecl *Parameter : D->parameters()) {
- if (PrintComma)
- Out << ", ";
- else
- PrintComma = true;
- Out << *Parameter;
- }
- Out << ")";
-
- // Check the semantic DeclContext.
- const DeclContext* SemaDC = D->getDeclContext();
- const DeclContext* LexicalDC = D->getLexicalDeclContext();
- if (SemaDC != LexicalDC)
- Out << " [[" << SemaDC << "]]";
-
- break;
- }
- case Decl::CXXConstructor: {
- const CXXConstructorDecl* D = cast<CXXConstructorDecl>(DC);
- if (D->isOutOfLine())
- Out << "[c++ ctor] ";
- else if (D->isImplicit())
- Out << "(c++ ctor) ";
- else
- Out << "<c++ ctor> ";
- Out << *D;
- // Print the parameters.
- Out << "(";
- bool PrintComma = false;
- for (ParmVarDecl *Parameter : D->parameters()) {
- if (PrintComma)
- Out << ", ";
- else
- PrintComma = true;
- Out << *Parameter;
- }
- Out << ")";
-
- // Check the semantic DC.
- const DeclContext* SemaDC = D->getDeclContext();
- const DeclContext* LexicalDC = D->getLexicalDeclContext();
- if (SemaDC != LexicalDC)
- Out << " [[" << SemaDC << "]]";
- break;
- }
- case Decl::CXXDestructor: {
- const CXXDestructorDecl* D = cast<CXXDestructorDecl>(DC);
- if (D->isOutOfLine())
- Out << "[c++ dtor] ";
- else if (D->isImplicit())
- Out << "(c++ dtor) ";
- else
- Out << "<c++ dtor> ";
- Out << *D;
- // Check the semantic DC.
- const DeclContext* SemaDC = D->getDeclContext();
- const DeclContext* LexicalDC = D->getLexicalDeclContext();
- if (SemaDC != LexicalDC)
- Out << " [[" << SemaDC << "]]";
- break;
- }
- case Decl::CXXConversion: {
- const CXXConversionDecl* D = cast<CXXConversionDecl>(DC);
- if (D->isOutOfLine())
- Out << "[c++ conversion] ";
- else if (D->isImplicit())
- Out << "(c++ conversion) ";
- else
- Out << "<c++ conversion> ";
- Out << *D;
- // Check the semantic DC.
- const DeclContext* SemaDC = D->getDeclContext();
- const DeclContext* LexicalDC = D->getLexicalDeclContext();
- if (SemaDC != LexicalDC)
- Out << " [[" << SemaDC << "]]";
- break;
- }
-
- case Decl::ClassTemplateSpecialization: {
- const auto *CTSD = cast<ClassTemplateSpecializationDecl>(DC);
- if (CTSD->isCompleteDefinition())
- Out << "[class template specialization] ";
- else
- Out << "<class template specialization> ";
- Out << *CTSD;
- break;
- }
-
- case Decl::ClassTemplatePartialSpecialization: {
- const auto *CTPSD = cast<ClassTemplatePartialSpecializationDecl>(DC);
- if (CTPSD->isCompleteDefinition())
- Out << "[class template partial specialization] ";
- else
- Out << "<class template partial specialization> ";
- Out << *CTPSD;
- break;
- }
-
- default:
- llvm_unreachable("a decl that inherits DeclContext isn't handled");
- }
-
- Out << "\n";
-
- // Print decls in the DeclContext.
- for (auto *I : DC->decls()) {
- for (unsigned i = 0; i < Indentation; ++i)
- Out << " ";
-
- Decl::Kind DK = I->getKind();
- switch (DK) {
- case Decl::Namespace:
- case Decl::Enum:
- case Decl::Record:
- case Decl::CXXRecord:
- case Decl::ObjCMethod:
- case Decl::ObjCInterface:
- case Decl::ObjCCategory:
- case Decl::ObjCProtocol:
- case Decl::ObjCImplementation:
- case Decl::ObjCCategoryImpl:
- case Decl::LinkageSpec:
- case Decl::Block:
- case Decl::Function:
- case Decl::CXXMethod:
- case Decl::CXXConstructor:
- case Decl::CXXDestructor:
- case Decl::CXXConversion:
- case Decl::ClassTemplateSpecialization:
- case Decl::ClassTemplatePartialSpecialization: {
- DeclContext* DC = cast<DeclContext>(I);
- PrintDeclContext(DC, Indentation+2);
- break;
- }
- case Decl::IndirectField:
- Out << "<IndirectField> " << *cast<IndirectFieldDecl>(I) << '\n';
- break;
- case Decl::Label:
- Out << "<Label> " << *cast<LabelDecl>(I) << '\n';
- break;
- case Decl::Field:
- Out << "<field> " << *cast<FieldDecl>(I) << '\n';
- break;
- case Decl::Typedef:
- case Decl::TypeAlias:
- Out << "<typedef> " << *cast<TypedefNameDecl>(I) << '\n';
- break;
- case Decl::EnumConstant:
- Out << "<enum constant> " << *cast<EnumConstantDecl>(I) << '\n';
- break;
- case Decl::Var:
- Out << "<var> " << *cast<VarDecl>(I) << '\n';
- break;
- case Decl::ImplicitParam:
- Out << "<implicit parameter> " << *cast<ImplicitParamDecl>(I) << '\n';
- break;
- case Decl::ParmVar:
- Out << "<parameter> " << *cast<ParmVarDecl>(I) << '\n';
- break;
- case Decl::ObjCProperty:
- Out << "<objc property> " << *cast<ObjCPropertyDecl>(I) << '\n';
- break;
- case Decl::FunctionTemplate:
- Out << "<function template> " << *cast<FunctionTemplateDecl>(I) << '\n';
- break;
- case Decl::TypeAliasTemplate:
- Out << "<type alias template> " << *cast<TypeAliasTemplateDecl>(I)
- << '\n';
- break;
- case Decl::FileScopeAsm:
- Out << "<file-scope asm>\n";
- break;
- case Decl::UsingDirective:
- Out << "<using directive>\n";
- break;
- case Decl::NamespaceAlias:
- Out << "<namespace alias> " << *cast<NamespaceAliasDecl>(I) << '\n';
- break;
- case Decl::ClassTemplate:
- Out << "<class template> " << *cast<ClassTemplateDecl>(I) << '\n';
- break;
- case Decl::OMPThreadPrivate: {
- Out << "<omp threadprivate> " << '"' << I << "\"\n";
- break;
- }
- case Decl::Friend: {
- Out << "<friend>";
- if (const NamedDecl *ND = cast<FriendDecl>(I)->getFriendDecl())
- Out << ' ' << *ND;
- Out << "\n";
- break;
- }
- case Decl::Using:
- Out << "<using> " << *cast<UsingDecl>(I) << "\n";
- break;
- case Decl::UsingShadow:
- Out << "<using shadow> " << *cast<UsingShadowDecl>(I) << "\n";
- break;
- case Decl::UnresolvedUsingValue:
- Out << "<unresolved using value> " << *cast<UnresolvedUsingValueDecl>(I)
- << "\n";
- break;
- case Decl::Empty:
- Out << "<empty>\n";
- break;
- case Decl::AccessSpec:
- Out << "<access specifier>\n";
- break;
- case Decl::VarTemplate:
- Out << "<var template> " << *cast<VarTemplateDecl>(I) << "\n";
- break;
- case Decl::StaticAssert:
- Out << "<static assert>\n";
- break;
-
- default:
- Out << "DeclKind: " << DK << '"' << I << "\"\n";
- llvm_unreachable("decl unhandled");
- }
- }
-}
-std::unique_ptr<ASTConsumer> clang::CreateDeclContextPrinter() {
- return llvm::make_unique<DeclContextPrinter>();
-}
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index d72fc92cc9..c7b2551cb8 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -37,7 +37,6 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendAction.h"
@@ -45,7 +44,6 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/MultiplexConsumer.h"
-#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Frontend/PrecompiledPreamble.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
@@ -64,6 +62,7 @@
#include "clang/Serialization/ASTWriter.h"
#include "clang/Serialization/ContinuousRangeMap.h"
#include "clang/Serialization/Module.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
@@ -88,6 +87,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/Timer.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <atomic>
@@ -155,9 +155,8 @@ static bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
/// and file-to-buffer remappings inside \p Invocation.
static std::unique_ptr<llvm::MemoryBuffer>
getBufferForFileHandlingRemapping(const CompilerInvocation &Invocation,
- vfs::FileSystem *VFS,
- StringRef FilePath,
- bool isVolatile) {
+ llvm::vfs::FileSystem *VFS,
+ StringRef FilePath, bool isVolatile) {
const auto &PreprocessorOpts = Invocation.getPreprocessorOpts();
// Try to determine if the main file has been remapped, either from the
@@ -283,7 +282,7 @@ void ASTUnit::enableSourceFileDiagnostics() {
/// Determine the set of code-completion contexts in which this
/// declaration should be shown.
-static unsigned getDeclShowContexts(const NamedDecl *ND,
+static uint64_t getDeclShowContexts(const NamedDecl *ND,
const LangOptions &LangOpts,
bool &IsNestedNameSpecifier) {
IsNestedNameSpecifier = false;
@@ -437,14 +436,15 @@ void ASTUnit::CacheCodeCompletionResults() {
| (1LL << CodeCompletionContext::CCC_UnionTag)
| (1LL << CodeCompletionContext::CCC_ClassOrStructTag)
| (1LL << CodeCompletionContext::CCC_Type)
- | (1LL << CodeCompletionContext::CCC_PotentiallyQualifiedName)
+ | (1LL << CodeCompletionContext::CCC_Symbol)
+ | (1LL << CodeCompletionContext::CCC_SymbolOrNewName)
| (1LL << CodeCompletionContext::CCC_ParenthesizedExpression);
if (isa<NamespaceDecl>(R.Declaration) ||
isa<NamespaceAliasDecl>(R.Declaration))
NNSContexts |= (1LL << CodeCompletionContext::CCC_Namespace);
- if (unsigned RemainingContexts
+ if (uint64_t RemainingContexts
= NNSContexts & ~CachedResult.ShowInContexts) {
// If there any contexts where this completion can be a
// nested-name-specifier but isn't already an option, create a
@@ -752,7 +752,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->Diagnostics = Diags;
- IntrusiveRefCntPtr<vfs::FileSystem> VFS = vfs::getRealFileSystem();
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
+ llvm::vfs::getRealFileSystem();
AST->FileMgr = new FileManager(FileSystemOpts, VFS);
AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
@@ -1074,7 +1075,7 @@ static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> &
/// contain any translation-unit information, false otherwise.
bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!Invocation)
return true;
@@ -1082,7 +1083,7 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
if (OverrideMainBuffer) {
assert(Preamble &&
"No preamble was built, but OverrideMainBuffer is not null");
- IntrusiveRefCntPtr<vfs::FileSystem> OldVFS = VFS;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> OldVFS = VFS;
Preamble->AddImplicitPreamble(*CCInvocation, VFS, OverrideMainBuffer.get());
if (OldVFS != VFS && FileMgr) {
assert(OldVFS == FileMgr->getVirtualFileSystem() &&
@@ -1279,7 +1280,7 @@ std::unique_ptr<llvm::MemoryBuffer>
ASTUnit::getMainBufferWithPrecompiledPreamble(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
CompilerInvocation &PreambleInvocationIn,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS, bool AllowRebuild,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS, bool AllowRebuild,
unsigned MaxLines) {
auto MainFilePath =
PreambleInvocationIn.getFrontendOpts().Inputs[0].getFile();
@@ -1468,7 +1469,7 @@ ASTUnit::create(std::shared_ptr<CompilerInvocation> CI,
bool CaptureDiagnostics, bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(false));
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
- IntrusiveRefCntPtr<vfs::FileSystem> VFS =
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(*CI, *Diags);
AST->Diagnostics = Diags;
AST->FileSystemOpts = CI->getFileSystemOpts();
@@ -1630,7 +1631,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
bool ASTUnit::LoadFromCompilerInvocation(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
unsigned PrecompilePreambleAfterNParses,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!Invocation)
return true;
@@ -1709,7 +1710,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
bool AllowPCHWithCompilerErrors, SkipFunctionBodiesScope SkipFunctionBodies,
bool SingleFileParse, bool UserFilesAreVolatile, bool ForSerialization,
llvm::Optional<StringRef> ModuleFormat, std::unique_ptr<ASTUnit> *ErrAST,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
assert(Diags.get() && "no DiagnosticsEngine was provided");
SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
@@ -1754,7 +1755,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
AST->Diagnostics = Diags;
AST->FileSystemOpts = CI->getFileSystemOpts();
if (!VFS)
- VFS = vfs::getRealFileSystem();
+ VFS = llvm::vfs::getRealFileSystem();
VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS);
AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
AST->PCMCache = new MemoryBufferCache;
@@ -1794,7 +1795,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
ArrayRef<RemappedFile> RemappedFiles,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!Invocation)
return true;
@@ -1951,8 +1952,8 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_ObjCPropertyAccess:
case CodeCompletionContext::CCC_Namespace:
case CodeCompletionContext::CCC_Type:
- case CodeCompletionContext::CCC_Name:
- case CodeCompletionContext::CCC_PotentiallyQualifiedName:
+ case CodeCompletionContext::CCC_Symbol:
+ case CodeCompletionContext::CCC_SymbolOrNewName:
case CodeCompletionContext::CCC_ParenthesizedExpression:
case CodeCompletionContext::CCC_ObjCInterfaceName:
break;
@@ -1977,6 +1978,7 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_ObjCClassMessage:
case CodeCompletionContext::CCC_ObjCCategoryName:
case CodeCompletionContext::CCC_IncludedFile:
+ case CodeCompletionContext::CCC_NewName:
// We're looking for nothing, or we're looking for names that cannot
// be hidden.
return;
@@ -2646,9 +2648,9 @@ InputKind ASTUnit::getInputKind() const {
else if (LangOpts.RenderScript)
Lang = InputKind::RenderScript;
else if (LangOpts.CPlusPlus)
- Lang = LangOpts.ObjC1 ? InputKind::ObjCXX : InputKind::CXX;
+ Lang = LangOpts.ObjC ? InputKind::ObjCXX : InputKind::CXX;
else
- Lang = LangOpts.ObjC1 ? InputKind::ObjC : InputKind::C;
+ Lang = LangOpts.ObjC ? InputKind::ObjC : InputKind::C;
InputKind::Format Fmt = InputKind::Source;
if (LangOpts.getCompilingModule() == LangOptions::CMK_ModuleMap)
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index 6161b46a9d..3bd159537b 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -16,10 +16,8 @@ add_clang_library(clangFrontend
ASTConsumers.cpp
ASTMerge.cpp
ASTUnit.cpp
- CacheTokens.cpp
ChainedDiagnosticConsumer.cpp
ChainedIncludesSource.cpp
- CodeGenOptions.cpp
CompilerInstance.cpp
CompilerInvocation.cpp
CreateInvocationFromCommandLine.cpp
@@ -38,7 +36,6 @@ add_clang_library(clangFrontend
LogDiagnosticPrinter.cpp
ModuleDependencyCollector.cpp
MultiplexConsumer.cpp
- PCHContainerOperations.cpp
PrecompiledPreamble.cpp
PrintPreprocessedOutput.cpp
SerializedDiagnosticPrinter.cpp
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
deleted file mode 100644
index c4504a1445..0000000000
--- a/lib/Frontend/CacheTokens.cpp
+++ /dev/null
@@ -1,700 +0,0 @@
-//===--- CacheTokens.cpp - Caching of lexer tokens for PTH support --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This provides a possible implementation of PTH support for Clang that is
-// based on caching lexed tokens and identifiers.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/FileManager.h"
-#include "clang/Basic/FileSystemStatCache.h"
-#include "clang/Basic/IdentifierTable.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Frontend/Utils.h"
-#include "clang/Lex/Lexer.h"
-#include "clang/Lex/PTHManager.h"
-#include "clang/Lex/Preprocessor.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/Support/DJB.h"
-#include "llvm/Support/EndianStream.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/OnDiskHashTable.h"
-#include "llvm/Support/Path.h"
-
-// FIXME: put this somewhere else?
-#ifndef S_ISDIR
-#define S_ISDIR(x) (((x)&_S_IFDIR)!=0)
-#endif
-
-using namespace clang;
-
-//===----------------------------------------------------------------------===//
-// PTH-specific stuff.
-//===----------------------------------------------------------------------===//
-
-typedef uint32_t Offset;
-
-namespace {
-class PTHEntry {
- Offset TokenData, PPCondData;
-
-public:
- PTHEntry() {}
-
- PTHEntry(Offset td, Offset ppcd)
- : TokenData(td), PPCondData(ppcd) {}
-
- Offset getTokenOffset() const { return TokenData; }
- Offset getPPCondTableOffset() const { return PPCondData; }
-};
-
-
-class PTHEntryKeyVariant {
- union {
- const FileEntry *FE;
- // FIXME: Use "StringRef Path;" when MSVC 2013 is dropped.
- const char *PathPtr;
- };
- size_t PathSize;
- enum { IsFE = 0x1, IsDE = 0x2, IsNoExist = 0x0 } Kind;
- FileData *Data;
-
-public:
- PTHEntryKeyVariant(const FileEntry *fe) : FE(fe), Kind(IsFE), Data(nullptr) {}
-
- PTHEntryKeyVariant(FileData *Data, StringRef Path)
- : PathPtr(Path.data()), PathSize(Path.size()), Kind(IsDE),
- Data(new FileData(*Data)) {}
-
- explicit PTHEntryKeyVariant(StringRef Path)
- : PathPtr(Path.data()), PathSize(Path.size()), Kind(IsNoExist),
- Data(nullptr) {}
-
- bool isFile() const { return Kind == IsFE; }
-
- StringRef getString() const {
- return Kind == IsFE ? FE->getName() : StringRef(PathPtr, PathSize);
- }
-
- unsigned getKind() const { return (unsigned) Kind; }
-
- void EmitData(raw_ostream& Out) {
- using namespace llvm::support;
- endian::Writer LE(Out, little);
- switch (Kind) {
- case IsFE: {
- // Emit stat information.
- llvm::sys::fs::UniqueID UID = FE->getUniqueID();
- LE.write<uint64_t>(UID.getFile());
- LE.write<uint64_t>(UID.getDevice());
- LE.write<uint64_t>(FE->getModificationTime());
- LE.write<uint64_t>(FE->getSize());
- } break;
- case IsDE:
- // Emit stat information.
- LE.write<uint64_t>(Data->UniqueID.getFile());
- LE.write<uint64_t>(Data->UniqueID.getDevice());
- LE.write<uint64_t>(Data->ModTime);
- LE.write<uint64_t>(Data->Size);
- delete Data;
- break;
- default:
- break;
- }
- }
-
- unsigned getRepresentationLength() const {
- return Kind == IsNoExist ? 0 : 4 * 8;
- }
-};
-
-class FileEntryPTHEntryInfo {
-public:
- typedef PTHEntryKeyVariant key_type;
- typedef key_type key_type_ref;
-
- typedef PTHEntry data_type;
- typedef const PTHEntry& data_type_ref;
-
- typedef unsigned hash_value_type;
- typedef unsigned offset_type;
-
- static hash_value_type ComputeHash(PTHEntryKeyVariant V) {
- return llvm::djbHash(V.getString());
- }
-
- static std::pair<unsigned,unsigned>
- EmitKeyDataLength(raw_ostream& Out, PTHEntryKeyVariant V,
- const PTHEntry& E) {
- using namespace llvm::support;
- endian::Writer LE(Out, little);
-
- unsigned n = V.getString().size() + 1 + 1;
- LE.write<uint16_t>(n);
-
- unsigned m = V.getRepresentationLength() + (V.isFile() ? 4 + 4 : 0);
- LE.write<uint8_t>(m);
-
- return std::make_pair(n, m);
- }
-
- static void EmitKey(raw_ostream& Out, PTHEntryKeyVariant V, unsigned n){
- using namespace llvm::support;
- // Emit the entry kind.
- Out << char(V.getKind());
- // Emit the string.
- Out.write(V.getString().data(), n - 1);
- }
-
- static void EmitData(raw_ostream& Out, PTHEntryKeyVariant V,
- const PTHEntry& E, unsigned) {
- using namespace llvm::support;
- endian::Writer LE(Out, little);
-
- // For file entries emit the offsets into the PTH file for token data
- // and the preprocessor blocks table.
- if (V.isFile()) {
- LE.write<uint32_t>(E.getTokenOffset());
- LE.write<uint32_t>(E.getPPCondTableOffset());
- }
-
- // Emit any other data associated with the key (i.e., stat information).
- V.EmitData(Out);
- }
-};
-
-class OffsetOpt {
- bool valid;
- Offset off;
-public:
- OffsetOpt() : valid(false) {}
- bool hasOffset() const { return valid; }
- Offset getOffset() const { assert(valid); return off; }
- void setOffset(Offset o) { off = o; valid = true; }
-};
-} // end anonymous namespace
-
-typedef llvm::OnDiskChainedHashTableGenerator<FileEntryPTHEntryInfo> PTHMap;
-
-namespace {
-class PTHWriter {
- typedef llvm::DenseMap<const IdentifierInfo*,uint32_t> IDMap;
- typedef llvm::StringMap<OffsetOpt, llvm::BumpPtrAllocator> CachedStrsTy;
-
- raw_pwrite_stream &Out;
- Preprocessor& PP;
- IDMap IM;
- std::vector<llvm::StringMapEntry<OffsetOpt>*> StrEntries;
- PTHMap PM;
- CachedStrsTy CachedStrs;
- uint32_t idcount;
- Offset CurStrOffset;
-
- //// Get the persistent id for the given IdentifierInfo*.
- uint32_t ResolveID(const IdentifierInfo* II);
-
- /// Emit a token to the PTH file.
- void EmitToken(const Token& T);
-
- void Emit8(uint32_t V) {
- Out << char(V);
- }
-
- void Emit16(uint32_t V) {
- using namespace llvm::support;
- endian::write<uint16_t>(Out, V, little);
- }
-
- void Emit32(uint32_t V) {
- using namespace llvm::support;
- endian::write<uint32_t>(Out, V, little);
- }
-
- void EmitBuf(const char *Ptr, unsigned NumBytes) {
- Out.write(Ptr, NumBytes);
- }
-
- void EmitString(StringRef V) {
- using namespace llvm::support;
- endian::write<uint16_t>(Out, V.size(), little);
- EmitBuf(V.data(), V.size());
- }
-
- /// EmitIdentifierTable - Emits two tables to the PTH file. The first is
- /// a hashtable mapping from identifier strings to persistent IDs.
- /// The second is a straight table mapping from persistent IDs to string data
- /// (the keys of the first table).
- std::pair<Offset, Offset> EmitIdentifierTable();
-
- /// EmitFileTable - Emit a table mapping from file name strings to PTH
- /// token data.
- Offset EmitFileTable() { return PM.Emit(Out); }
-
- PTHEntry LexTokens(Lexer& L);
- Offset EmitCachedSpellings();
-
-public:
- PTHWriter(raw_pwrite_stream &out, Preprocessor &pp)
- : Out(out), PP(pp), idcount(0), CurStrOffset(0) {}
-
- PTHMap &getPM() { return PM; }
- void GeneratePTH(StringRef MainFile);
-};
-} // end anonymous namespace
-
-uint32_t PTHWriter::ResolveID(const IdentifierInfo* II) {
- // Null IdentifierInfo's map to the persistent ID 0.
- if (!II)
- return 0;
-
- IDMap::iterator I = IM.find(II);
- if (I != IM.end())
- return I->second; // We've already added 1.
-
- IM[II] = ++idcount; // Pre-increment since '0' is reserved for NULL.
- return idcount;
-}
-
-void PTHWriter::EmitToken(const Token& T) {
- // Emit the token kind, flags, and length.
- Emit32(((uint32_t) T.getKind()) | ((((uint32_t) T.getFlags())) << 8)|
- (((uint32_t) T.getLength()) << 16));
-
- if (!T.isLiteral()) {
- Emit32(ResolveID(T.getIdentifierInfo()));
- } else {
- // We cache *un-cleaned* spellings. This gives us 100% fidelity with the
- // source code.
- StringRef s(T.getLiteralData(), T.getLength());
-
- // Get the string entry.
- auto &E = *CachedStrs.insert(std::make_pair(s, OffsetOpt())).first;
-
- // If this is a new string entry, bump the PTH offset.
- if (!E.second.hasOffset()) {
- E.second.setOffset(CurStrOffset);
- StrEntries.push_back(&E);
- CurStrOffset += s.size() + 1;
- }
-
- // Emit the relative offset into the PTH file for the spelling string.
- Emit32(E.second.getOffset());
- }
-
- // Emit the offset into the original source file of this token so that we
- // can reconstruct its SourceLocation.
- Emit32(PP.getSourceManager().getFileOffset(T.getLocation()));
-}
-
-PTHEntry PTHWriter::LexTokens(Lexer& L) {
- // Pad 0's so that we emit tokens to a 4-byte alignment.
- // This speed up reading them back in.
- using namespace llvm::support;
- endian::Writer LE(Out, little);
- uint32_t TokenOff = Out.tell();
- for (uint64_t N = llvm::OffsetToAlignment(TokenOff, 4); N; --N, ++TokenOff)
- LE.write<uint8_t>(0);
-
- // Keep track of matching '#if' ... '#endif'.
- typedef std::vector<std::pair<Offset, unsigned> > PPCondTable;
- PPCondTable PPCond;
- std::vector<unsigned> PPStartCond;
- bool ParsingPreprocessorDirective = false;
- Token Tok;
-
- do {
- L.LexFromRawLexer(Tok);
- NextToken:
-
- if ((Tok.isAtStartOfLine() || Tok.is(tok::eof)) &&
- ParsingPreprocessorDirective) {
- // Insert an eod token into the token cache. It has the same
- // position as the next token that is not on the same line as the
- // preprocessor directive. Observe that we continue processing
- // 'Tok' when we exit this branch.
- Token Tmp = Tok;
- Tmp.setKind(tok::eod);
- Tmp.clearFlag(Token::StartOfLine);
- Tmp.setIdentifierInfo(nullptr);
- EmitToken(Tmp);
- ParsingPreprocessorDirective = false;
- }
-
- if (Tok.is(tok::raw_identifier)) {
- PP.LookUpIdentifierInfo(Tok);
- EmitToken(Tok);
- continue;
- }
-
- if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) {
- // Special processing for #include. Store the '#' token and lex
- // the next token.
- assert(!ParsingPreprocessorDirective);
- Offset HashOff = (Offset) Out.tell();
-
- // Get the next token.
- Token NextTok;
- L.LexFromRawLexer(NextTok);
-
- // If we see the start of line, then we had a null directive "#". In
- // this case, discard both tokens.
- if (NextTok.isAtStartOfLine())
- goto NextToken;
-
- // The token is the start of a directive. Emit it.
- EmitToken(Tok);
- Tok = NextTok;
-
- // Did we see 'include'/'import'/'include_next'?
- if (Tok.isNot(tok::raw_identifier)) {
- EmitToken(Tok);
- continue;
- }
-
- IdentifierInfo* II = PP.LookUpIdentifierInfo(Tok);
- tok::PPKeywordKind K = II->getPPKeywordID();
-
- ParsingPreprocessorDirective = true;
-
- switch (K) {
- case tok::pp_not_keyword:
- // Invalid directives "#foo" can occur in #if 0 blocks etc, just pass
- // them through.
- default:
- break;
-
- case tok::pp_include:
- case tok::pp_import:
- case tok::pp_include_next: {
- // Save the 'include' token.
- EmitToken(Tok);
- // Lex the next token as an include string.
- L.setParsingPreprocessorDirective(true);
- L.LexIncludeFilename(Tok);
- L.setParsingPreprocessorDirective(false);
- assert(!Tok.isAtStartOfLine());
- if (Tok.is(tok::raw_identifier))
- PP.LookUpIdentifierInfo(Tok);
-
- break;
- }
- case tok::pp_if:
- case tok::pp_ifdef:
- case tok::pp_ifndef: {
- // Add an entry for '#if' and friends. We initially set the target
- // index to 0. This will get backpatched when we hit #endif.
- PPStartCond.push_back(PPCond.size());
- PPCond.push_back(std::make_pair(HashOff, 0U));
- break;
- }
- case tok::pp_endif: {
- // Add an entry for '#endif'. We set the target table index to itself.
- // This will later be set to zero when emitting to the PTH file. We
- // use 0 for uninitialized indices because that is easier to debug.
- unsigned index = PPCond.size();
- // Backpatch the opening '#if' entry.
- assert(!PPStartCond.empty());
- assert(PPCond.size() > PPStartCond.back());
- assert(PPCond[PPStartCond.back()].second == 0);
- PPCond[PPStartCond.back()].second = index;
- PPStartCond.pop_back();
- // Add the new entry to PPCond.
- PPCond.push_back(std::make_pair(HashOff, index));
- EmitToken(Tok);
-
- // Some files have gibberish on the same line as '#endif'.
- // Discard these tokens.
- do
- L.LexFromRawLexer(Tok);
- while (Tok.isNot(tok::eof) && !Tok.isAtStartOfLine());
- // We have the next token in hand.
- // Don't immediately lex the next one.
- goto NextToken;
- }
- case tok::pp_elif:
- case tok::pp_else: {
- // Add an entry for #elif or #else.
- // This serves as both a closing and opening of a conditional block.
- // This means that its entry will get backpatched later.
- unsigned index = PPCond.size();
- // Backpatch the previous '#if' entry.
- assert(!PPStartCond.empty());
- assert(PPCond.size() > PPStartCond.back());
- assert(PPCond[PPStartCond.back()].second == 0);
- PPCond[PPStartCond.back()].second = index;
- PPStartCond.pop_back();
- // Now add '#elif' as a new block opening.
- PPCond.push_back(std::make_pair(HashOff, 0U));
- PPStartCond.push_back(index);
- break;
- }
- }
- }
-
- EmitToken(Tok);
- }
- while (Tok.isNot(tok::eof));
-
- assert(PPStartCond.empty() && "Error: imblanced preprocessor conditionals.");
-
- // Next write out PPCond.
- Offset PPCondOff = (Offset) Out.tell();
-
- // Write out the size of PPCond so that clients can identifer empty tables.
- Emit32(PPCond.size());
-
- for (unsigned i = 0, e = PPCond.size(); i!=e; ++i) {
- Emit32(PPCond[i].first - TokenOff);
- uint32_t x = PPCond[i].second;
- assert(x != 0 && "PPCond entry not backpatched.");
- // Emit zero for #endifs. This allows us to do checking when
- // we read the PTH file back in.
- Emit32(x == i ? 0 : x);
- }
-
- return PTHEntry(TokenOff, PPCondOff);
-}
-
-Offset PTHWriter::EmitCachedSpellings() {
- // Write each cached strings to the PTH file.
- Offset SpellingsOff = Out.tell();
-
- for (std::vector<llvm::StringMapEntry<OffsetOpt>*>::iterator
- I = StrEntries.begin(), E = StrEntries.end(); I!=E; ++I)
- EmitBuf((*I)->getKeyData(), (*I)->getKeyLength()+1 /*nul included*/);
-
- return SpellingsOff;
-}
-
-static uint32_t swap32le(uint32_t X) {
- return llvm::support::endian::byte_swap<uint32_t, llvm::support::little>(X);
-}
-
-static void pwrite32le(raw_pwrite_stream &OS, uint32_t Val, uint64_t &Off) {
- uint32_t LEVal = swap32le(Val);
- OS.pwrite(reinterpret_cast<const char *>(&LEVal), 4, Off);
- Off += 4;
-}
-
-void PTHWriter::GeneratePTH(StringRef MainFile) {
- // Generate the prologue.
- Out << "cfe-pth" << '\0';
- Emit32(PTHManager::Version);
-
- // Leave 4 words for the prologue.
- Offset PrologueOffset = Out.tell();
- for (unsigned i = 0; i < 4; ++i)
- Emit32(0);
-
- // Write the name of the MainFile.
- if (!MainFile.empty()) {
- EmitString(MainFile);
- } else {
- // String with 0 bytes.
- Emit16(0);
- }
- Emit8(0);
-
- // Iterate over all the files in SourceManager. Create a lexer
- // for each file and cache the tokens.
- SourceManager &SM = PP.getSourceManager();
- const LangOptions &LOpts = PP.getLangOpts();
-
- for (SourceManager::fileinfo_iterator I = SM.fileinfo_begin(),
- E = SM.fileinfo_end(); I != E; ++I) {
- const SrcMgr::ContentCache &C = *I->second;
- const FileEntry *FE = C.OrigEntry;
-
- // FIXME: Handle files with non-absolute paths.
- if (llvm::sys::path::is_relative(FE->getName()))
- continue;
-
- const llvm::MemoryBuffer *B = C.getBuffer(PP.getDiagnostics(), SM);
- if (!B) continue;
-
- FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
- const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
- Lexer L(FID, FromFile, SM, LOpts);
- PM.insert(FE, LexTokens(L));
- }
-
- // Write out the identifier table.
- const std::pair<Offset,Offset> &IdTableOff = EmitIdentifierTable();
-
- // Write out the cached strings table.
- Offset SpellingOff = EmitCachedSpellings();
-
- // Write out the file table.
- Offset FileTableOff = EmitFileTable();
-
- // Finally, write the prologue.
- uint64_t Off = PrologueOffset;
- pwrite32le(Out, IdTableOff.first, Off);
- pwrite32le(Out, IdTableOff.second, Off);
- pwrite32le(Out, FileTableOff, Off);
- pwrite32le(Out, SpellingOff, Off);
-}
-
-namespace {
-/// StatListener - A simple "interpose" object used to monitor stat calls
-/// invoked by FileManager while processing the original sources used
-/// as input to PTH generation. StatListener populates the PTHWriter's
-/// file map with stat information for directories as well as negative stats.
-/// Stat information for files are populated elsewhere.
-class StatListener : public FileSystemStatCache {
- PTHMap &PM;
-public:
- StatListener(PTHMap &pm) : PM(pm) {}
- ~StatListener() override {}
-
- LookupResult getStat(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F,
- vfs::FileSystem &FS) override {
- LookupResult Result = statChained(Path, Data, isFile, F, FS);
-
- if (Result == CacheMissing) // Failed 'stat'.
- PM.insert(PTHEntryKeyVariant(Path), PTHEntry());
- else if (Data.IsDirectory) {
- // Only cache directories with absolute paths.
- if (llvm::sys::path::is_relative(Path))
- return Result;
-
- PM.insert(PTHEntryKeyVariant(&Data, Path), PTHEntry());
- }
-
- return Result;
- }
-};
-} // end anonymous namespace
-
-void clang::CacheTokens(Preprocessor &PP, raw_pwrite_stream *OS) {
- // Get the name of the main file.
- const SourceManager &SrcMgr = PP.getSourceManager();
- const FileEntry *MainFile = SrcMgr.getFileEntryForID(SrcMgr.getMainFileID());
- SmallString<128> MainFilePath(MainFile->getName());
-
- llvm::sys::fs::make_absolute(MainFilePath);
-
- // Create the PTHWriter.
- PTHWriter PW(*OS, PP);
-
- // Install the 'stat' system call listener in the FileManager.
- auto StatCacheOwner = llvm::make_unique<StatListener>(PW.getPM());
- StatListener *StatCache = StatCacheOwner.get();
- PP.getFileManager().addStatCache(std::move(StatCacheOwner),
- /*AtBeginning=*/true);
-
- // Lex through the entire file. This will populate SourceManager with
- // all of the header information.
- Token Tok;
- PP.EnterMainSourceFile();
- do { PP.Lex(Tok); } while (Tok.isNot(tok::eof));
-
- // Generate the PTH file.
- PP.getFileManager().removeStatCache(StatCache);
- PW.GeneratePTH(MainFilePath.str());
-}
-
-//===----------------------------------------------------------------------===//
-
-namespace {
-class PTHIdKey {
-public:
- const IdentifierInfo* II;
- uint32_t FileOffset;
-};
-
-class PTHIdentifierTableTrait {
-public:
- typedef PTHIdKey* key_type;
- typedef key_type key_type_ref;
-
- typedef uint32_t data_type;
- typedef data_type data_type_ref;
-
- typedef unsigned hash_value_type;
- typedef unsigned offset_type;
-
- static hash_value_type ComputeHash(PTHIdKey* key) {
- return llvm::djbHash(key->II->getName());
- }
-
- static std::pair<unsigned,unsigned>
- EmitKeyDataLength(raw_ostream& Out, const PTHIdKey* key, uint32_t) {
- using namespace llvm::support;
- unsigned n = key->II->getLength() + 1;
- endian::write<uint16_t>(Out, n, little);
- return std::make_pair(n, sizeof(uint32_t));
- }
-
- static void EmitKey(raw_ostream& Out, PTHIdKey* key, unsigned n) {
- // Record the location of the key data. This is used when generating
- // the mapping from persistent IDs to strings.
- key->FileOffset = Out.tell();
- Out.write(key->II->getNameStart(), n);
- }
-
- static void EmitData(raw_ostream& Out, PTHIdKey*, uint32_t pID,
- unsigned) {
- using namespace llvm::support;
- endian::write<uint32_t>(Out, pID, little);
- }
-};
-} // end anonymous namespace
-
-/// EmitIdentifierTable - Emits two tables to the PTH file. The first is
-/// a hashtable mapping from identifier strings to persistent IDs. The second
-/// is a straight table mapping from persistent IDs to string data (the
-/// keys of the first table).
-///
-std::pair<Offset,Offset> PTHWriter::EmitIdentifierTable() {
- // Build two maps:
- // (1) an inverse map from persistent IDs -> (IdentifierInfo*,Offset)
- // (2) a map from (IdentifierInfo*, Offset)* -> persistent IDs
-
- // Note that we use 'calloc', so all the bytes are 0.
- PTHIdKey *IIDMap = static_cast<PTHIdKey*>(
- llvm::safe_calloc(idcount, sizeof(PTHIdKey)));
-
- // Create the hashtable.
- llvm::OnDiskChainedHashTableGenerator<PTHIdentifierTableTrait> IIOffMap;
-
- // Generate mapping from persistent IDs -> IdentifierInfo*.
- for (IDMap::iterator I = IM.begin(), E = IM.end(); I != E; ++I) {
- // Decrement by 1 because we are using a vector for the lookup and
- // 0 is reserved for NULL.
- assert(I->second > 0);
- assert(I->second-1 < idcount);
- unsigned idx = I->second-1;
-
- // Store the mapping from persistent ID to IdentifierInfo*
- IIDMap[idx].II = I->first;
-
- // Store the reverse mapping in a hashtable.
- IIOffMap.insert(&IIDMap[idx], I->second);
- }
-
- // Write out the inverse map first. This causes the PCIDKey entries to
- // record PTH file offsets for the string data. This is used to write
- // the second table.
- Offset StringTableOffset = IIOffMap.Emit(Out);
-
- // Now emit the table mapping from persistent IDs to PTH file offsets.
- Offset IDOff = Out.tell();
- Emit32(idcount); // Emit the number of identifiers.
- for (unsigned i = 0 ; i < idcount; ++i)
- Emit32(IIDMap[i].FileOffset);
-
- // Finally, release the inverse map.
- free(IIDMap);
-
- return std::make_pair(IDOff, StringTableOffset);
-}
diff --git a/lib/Frontend/ChainedIncludesSource.cpp b/lib/Frontend/ChainedIncludesSource.cpp
index 4e8eb32121..1bfc25c4c7 100644
--- a/lib/Frontend/ChainedIncludesSource.cpp
+++ b/lib/Frontend/ChainedIncludesSource.cpp
@@ -129,7 +129,6 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
CInvok->getPreprocessorOpts().ChainedIncludes.clear();
CInvok->getPreprocessorOpts().ImplicitPCHInclude.clear();
- CInvok->getPreprocessorOpts().ImplicitPTHInclude.clear();
CInvok->getPreprocessorOpts().DisablePCHValidation = true;
CInvok->getPreprocessorOpts().Includes.clear();
CInvok->getPreprocessorOpts().MacroIncludes.clear();
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index 05ba3c0265..f666745354 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -30,7 +30,6 @@
#include "clang/Frontend/Utils.h"
#include "clang/Frontend/VerifyDiagnosticConsumer.h"
#include "clang/Lex/HeaderSearch.h"
-#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Sema/CodeCompleteConsumer.h"
@@ -38,6 +37,7 @@
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
@@ -177,9 +177,9 @@ static void collectIncludePCH(CompilerInstance &CI,
std::error_code EC;
SmallString<128> DirNative;
llvm::sys::path::native(PCHDir->getName(), DirNative);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
SimpleASTReaderListener Validator(CI.getPreprocessor());
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
// Check whether this is an AST file. ASTReader::isAcceptableASTFile is not
// used here since we're not interested in validating the PCH at this time,
@@ -198,14 +198,14 @@ static void collectVFSEntries(CompilerInstance &CI,
return;
// Collect all VFS found.
- SmallVector<vfs::YAMLVFSEntry, 16> VFSEntries;
+ SmallVector<llvm::vfs::YAMLVFSEntry, 16> VFSEntries;
for (const std::string &VFSFile : CI.getHeaderSearchOpts().VFSOverlayFiles) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
llvm::MemoryBuffer::getFile(VFSFile);
if (!Buffer)
return;
- vfs::collectVFSFromYAML(std::move(Buffer.get()), /*DiagHandler*/ nullptr,
- VFSFile, VFSEntries);
+ llvm::vfs::collectVFSFromYAML(std::move(Buffer.get()),
+ /*DiagHandler*/ nullptr, VFSFile, VFSEntries);
}
for (auto &E : VFSEntries)
@@ -303,7 +303,7 @@ CompilerInstance::createDiagnostics(DiagnosticOptions *Opts,
FileManager *CompilerInstance::createFileManager() {
if (!hasVirtualFileSystem()) {
- IntrusiveRefCntPtr<vfs::FileSystem> VFS =
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(getInvocation(), getDiagnostics());
setVirtualFileSystem(VFS);
}
@@ -372,10 +372,8 @@ static void InitializeFileRemapping(DiagnosticsEngine &Diags,
void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
const PreprocessorOptions &PPOpts = getPreprocessorOpts();
- // Create a PTH manager if we are using some form of a token cache.
- PTHManager *PTHMgr = nullptr;
- if (!PPOpts.TokenCache.empty())
- PTHMgr = PTHManager::Create(PPOpts.TokenCache, getDiagnostics());
+ // The module manager holds a reference to the old preprocessor (if any).
+ ModuleManager.reset();
// Create the Preprocessor.
HeaderSearch *HeaderInfo =
@@ -383,19 +381,12 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
getDiagnostics(), getLangOpts(), &getTarget());
PP = std::make_shared<Preprocessor>(
Invocation->getPreprocessorOptsPtr(), getDiagnostics(), getLangOpts(),
- getSourceManager(), getPCMCache(), *HeaderInfo, *this, PTHMgr,
+ getSourceManager(), getPCMCache(), *HeaderInfo, *this,
+ /*IdentifierInfoLookup=*/nullptr,
/*OwnsHeaderSearch=*/true, TUKind);
getTarget().adjust(getLangOpts());
PP->Initialize(getTarget(), getAuxTarget());
- // Note that this is different then passing PTHMgr to Preprocessor's ctor.
- // That argument is used as the IdentifierInfoLookup argument to
- // IdentifierTable's ctor.
- if (PTHMgr) {
- PTHMgr->setPreprocessor(&*PP);
- PP->setPTHManager(PTHMgr);
- }
-
if (PPOpts.DetailedRecord)
PP->createPreprocessingRecord();
@@ -1024,7 +1015,7 @@ static InputKind::Language getLanguageFromOptions(const LangOptions &LangOpts) {
return InputKind::OpenCL;
if (LangOpts.CUDA)
return InputKind::CUDA;
- if (LangOpts.ObjC1)
+ if (LangOpts.ObjC)
return LangOpts.CPlusPlus ? InputKind::ObjCXX : InputKind::ObjC;
return LangOpts.CPlusPlus ? InputKind::CXX : InputKind::C;
}
@@ -1269,7 +1260,7 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
<< Module->Name << Locked.getErrorMessage();
// Clear out any potential leftover.
Locked.unsafeRemoveLockFile();
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case llvm::LockFileManager::LFS_Owned:
// We're responsible for building the module ourselves.
if (!compileModuleImpl(ImportingInstance, ModuleNameLoc, Module,
@@ -1724,7 +1715,9 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// module cache, we don't know how to rebuild modules.
unsigned ARRFlags = Source == ModuleCache ?
ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing :
- ASTReader::ARR_ConfigurationMismatch;
+ Source == PrebuiltModulePath ?
+ 0 :
+ ASTReader::ARR_ConfigurationMismatch;
switch (ModuleManager->ReadAST(ModuleFileName,
Source == PrebuiltModulePath
? serialization::MK_PrebuiltModule
@@ -2127,7 +2120,7 @@ CompilerInstance::lookupMissingImports(StringRef Name,
return false;
}
-void CompilerInstance::resetAndLeakSema() { BuryPointer(takeSema()); }
+void CompilerInstance::resetAndLeakSema() { llvm::BuryPointer(takeSema()); }
void CompilerInstance::setExternalSemaSource(
IntrusiveRefCntPtr<ExternalSemaSource> ESS) {
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 782c796452..d491f35769 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -11,6 +11,7 @@
#include "TestModuleFileExtension.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/CommentOptions.h"
#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Basic/Diagnostic.h"
@@ -23,13 +24,11 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/Version.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Basic/Visibility.h"
#include "clang/Basic/XRayInstr.h"
#include "clang/Config/config.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/CommandLineSourceLoc.h"
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@@ -77,6 +76,7 @@
#include "llvm/Support/Process.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
@@ -120,25 +120,25 @@ CompilerInvocationBase::~CompilerInvocationBase() = default;
static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags) {
- unsigned DefaultOpt = 0;
+ unsigned DefaultOpt = llvm::CodeGenOpt::None;
if (IK.getLanguage() == InputKind::OpenCL && !Args.hasArg(OPT_cl_opt_disable))
- DefaultOpt = 2;
+ DefaultOpt = llvm::CodeGenOpt::Default;
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
if (A->getOption().matches(options::OPT_O0))
- return 0;
+ return llvm::CodeGenOpt::None;
if (A->getOption().matches(options::OPT_Ofast))
- return 3;
+ return llvm::CodeGenOpt::Aggressive;
assert(A->getOption().matches(options::OPT_O));
StringRef S(A->getValue());
if (S == "s" || S == "z" || S.empty())
- return 2;
+ return llvm::CodeGenOpt::Default;
if (S == "g")
- return 1;
+ return llvm::CodeGenOpt::Less;
return getLastArgIntValue(Args, OPT_O, DefaultOpt, Diags);
}
@@ -181,6 +181,11 @@ static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
}
}
+// Parse the Static Analyzer configuration. If \p Diags is set to nullptr,
+// it won't verify the input.
+static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
+ DiagnosticsEngine *Diags);
+
static void getAllNoBuiltinFuncValues(ArgList &Args,
std::vector<std::string> &Funcs) {
SmallVector<const char *, 8> Values;
@@ -279,7 +284,14 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
+ Opts.ShowConfigOptionsList = Args.hasArg(OPT_analyzer_config_help);
Opts.ShowEnabledCheckerList = Args.hasArg(OPT_analyzer_list_enabled_checkers);
+ Opts.ShouldEmitErrorsOnInvalidConfigValue =
+ /* negated */!llvm::StringSwitch<bool>(
+ Args.getLastArgValue(OPT_analyzer_config_compatibility_mode))
+ .Case("true", true)
+ .Case("false", false)
+ .Default(false);
Opts.DisableAllChecks = Args.hasArg(OPT_analyzer_disable_all_checks);
Opts.visualizeExplodedGraphWithGraphViz =
@@ -316,7 +328,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
// Go through the analyzer configuration options.
for (const auto *A : Args.filtered(OPT_analyzer_config)) {
- A->claim();
+
// We can have a list of comma separated config names, e.g:
// '-analyzer-config key1=val1,key2=val2'
StringRef configList = A->getValue();
@@ -338,10 +350,25 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Success = false;
break;
}
+
+ // TODO: Check checker options too, possibly in CheckerRegistry.
+ // Leave unknown non-checker configs unclaimed.
+ if (!key.contains(":") && Opts.isUnknownAnalyzerConfig(key)) {
+ if (Opts.ShouldEmitErrorsOnInvalidConfigValue)
+ Diags.Report(diag::err_analyzer_config_unknown) << key;
+ continue;
+ }
+
+ A->claim();
Opts.Config[key] = val;
}
}
+ if (Opts.ShouldEmitErrorsOnInvalidConfigValue)
+ parseAnalyzerConfigs(Opts, &Diags);
+ else
+ parseAnalyzerConfigs(Opts, nullptr);
+
llvm::raw_string_ostream os(Opts.FullCompilerInvocation);
for (unsigned i = 0; i < Args.getNumInputArgStrings(); ++i) {
if (i != 0)
@@ -353,6 +380,91 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
return Success;
}
+static StringRef getStringOption(AnalyzerOptions::ConfigTable &Config,
+ StringRef OptionName, StringRef DefaultVal) {
+ return Config.insert({OptionName, DefaultVal}).first->second;
+}
+
+static void initOption(AnalyzerOptions::ConfigTable &Config,
+ DiagnosticsEngine *Diags,
+ StringRef &OptionField, StringRef Name,
+ StringRef DefaultVal) {
+ // String options may be known to invalid (e.g. if the expected string is a
+ // file name, but the file does not exist), those will have to be checked in
+ // parseConfigs.
+ OptionField = getStringOption(Config, Name, DefaultVal);
+}
+
+static void initOption(AnalyzerOptions::ConfigTable &Config,
+ DiagnosticsEngine *Diags,
+ bool &OptionField, StringRef Name, bool DefaultVal) {
+ auto PossiblyInvalidVal = llvm::StringSwitch<Optional<bool>>(
+ getStringOption(Config, Name, (DefaultVal ? "true" : "false")))
+ .Case("true", true)
+ .Case("false", false)
+ .Default(None);
+
+ if (!PossiblyInvalidVal) {
+ if (Diags)
+ Diags->Report(diag::err_analyzer_config_invalid_input)
+ << Name << "a boolean";
+ else
+ OptionField = DefaultVal;
+ } else
+ OptionField = PossiblyInvalidVal.getValue();
+}
+
+static void initOption(AnalyzerOptions::ConfigTable &Config,
+ DiagnosticsEngine *Diags,
+ unsigned &OptionField, StringRef Name,
+ unsigned DefaultVal) {
+
+ OptionField = DefaultVal;
+ bool HasFailed = getStringOption(Config, Name, std::to_string(DefaultVal))
+ .getAsInteger(10, OptionField);
+ if (Diags && HasFailed)
+ Diags->Report(diag::err_analyzer_config_invalid_input)
+ << Name << "an unsigned";
+}
+
+static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
+ DiagnosticsEngine *Diags) {
+ // TODO: There's no need to store the entire configtable, it'd be plenty
+ // enough tostore checker options.
+
+#define ANALYZER_OPTION(TYPE, NAME, CMDFLAG, DESC, DEFAULT_VAL) \
+ initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, DEFAULT_VAL);
+
+#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
+ SHALLOW_VAL, DEEP_VAL) \
+ switch (AnOpts.getUserMode()) { \
+ case UMK_Shallow: \
+ initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, SHALLOW_VAL); \
+ break; \
+ case UMK_Deep: \
+ initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, DEEP_VAL); \
+ break; \
+ } \
+
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
+#undef ANALYZER_OPTION
+#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
+
+ // At this point, AnalyzerOptions is configured. Let's validate some options.
+
+ if (!Diags)
+ return;
+
+ if (!AnOpts.CTUDir.empty() && !llvm::sys::fs::is_directory(AnOpts.CTUDir))
+ Diags->Report(diag::err_analyzer_config_invalid_input) << "ctu-dir"
+ << "a filename";
+
+ if (!AnOpts.ModelPath.empty() &&
+ !llvm::sys::fs::is_directory(AnOpts.ModelPath))
+ Diags->Report(diag::err_analyzer_config_invalid_input) << "model-path"
+ << "a filename";
+}
+
static bool ParseMigratorArgs(MigratorOptions &Opts, ArgList &Args) {
Opts.NoNSAllocReallocError = Args.hasArg(OPT_migrator_no_nsalloc_error);
Opts.NoFinalizeRemoval = Args.hasArg(OPT_migrator_no_finalize_removal);
@@ -592,12 +704,29 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 0, Diags);
Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
Opts.EmitCodeView = Args.hasArg(OPT_gcodeview);
+ Opts.CodeViewGHash = Args.hasArg(OPT_gcodeview_ghash);
Opts.MacroDebugInfo = Args.hasArg(OPT_debug_info_macro);
Opts.WholeProgramVTables = Args.hasArg(OPT_fwhole_program_vtables);
Opts.LTOVisibilityPublicStd = Args.hasArg(OPT_flto_visibility_public_std);
- Opts.EnableSplitDwarf = Args.hasArg(OPT_enable_split_dwarf);
Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
Opts.SplitDwarfInlining = !Args.hasArg(OPT_fno_split_dwarf_inlining);
+
+ if (Arg *A =
+ Args.getLastArg(OPT_enable_split_dwarf, OPT_enable_split_dwarf_EQ)) {
+ if (A->getOption().matches(options::OPT_enable_split_dwarf)) {
+ Opts.setSplitDwarfMode(CodeGenOptions::SplitFileFission);
+ } else {
+ StringRef Name = A->getValue();
+ if (Name == "single")
+ Opts.setSplitDwarfMode(CodeGenOptions::SingleFileFission);
+ else if (Name == "split")
+ Opts.setSplitDwarfMode(CodeGenOptions::SplitFileFission);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ }
+ }
+
Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
Opts.DebugExplicitImport = Args.hasArg(OPT_dwarf_explicit_import);
Opts.DebugFwdTemplateParams = Args.hasArg(OPT_debug_forward_template_params);
@@ -614,6 +743,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DisableLifetimeMarkers = Args.hasArg(OPT_disable_lifetimemarkers);
Opts.DisableO0ImplyOptNone = Args.hasArg(OPT_disable_O0_optnone);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
+ Opts.IndirectTlsSegRefs = Args.hasArg(OPT_mno_tls_direct_seg_refs);
Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
Opts.UseRegisterSizedBitfieldAccess = Args.hasArg(
OPT_fuse_register_sized_bitfield_access);
@@ -625,6 +755,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasFlag(OPT_ffine_grained_bitfield_accesses,
OPT_fno_fine_grained_bitfield_accesses, false);
Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
+ Opts.RecordCommandLine = Args.getLastArgValue(OPT_record_command_line);
Opts.MergeAllConstants = Args.hasArg(OPT_fmerge_all_constants);
Opts.NoCommon = Args.hasArg(OPT_fno_common);
Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
@@ -649,6 +780,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
: Args.hasArg(OPT_gpubnames)
? llvm::DICompileUnit::DebugNameTableKind::Default
: llvm::DICompileUnit::DebugNameTableKind::None);
+ Opts.DebugRangesBaseAddress = Args.hasArg(OPT_fdebug_ranges_base_address);
setPGOInstrumentor(Opts, Args, Diags);
Opts.InstrProfileOutput =
@@ -657,6 +789,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.getLastArgValue(OPT_fprofile_instrument_use_path_EQ);
if (!Opts.ProfileInstrumentUsePath.empty())
setPGOUseInstrumentor(Opts, Opts.ProfileInstrumentUsePath);
+ Opts.ProfileRemappingFile =
+ Args.getLastArgValue(OPT_fprofile_remapping_file_EQ);
+ if (!Opts.ProfileRemappingFile.empty() && !Opts.ExperimentalNewPassManager) {
+ Diags.Report(diag::err_drv_argument_only_allowed_with)
+ << Args.getLastArg(OPT_fprofile_remapping_file_EQ)->getAsString(Args)
+ << "-fexperimental-new-pass-manager";
+ }
Opts.CoverageMapping =
Args.hasFlag(OPT_fcoverage_mapping, OPT_fno_coverage_mapping, false);
@@ -669,7 +808,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.RegisterGlobalDtorsWithAtExit =
Args.hasArg(OPT_fregister_global_dtors_with_atexit);
Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
- Opts.CodeModel = getCodeModel(Args, Diags);
+ Opts.CodeModel = TargetOpts.CodeModel;
Opts.DebugPass = Args.getLastArgValue(OPT_mdebug_pass);
Opts.DisableFPElim =
(Args.hasArg(OPT_mdisable_fp_elim) || Args.hasArg(OPT_pg));
@@ -802,6 +941,10 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.CoverageExtraChecksum = Args.hasArg(OPT_coverage_cfg_checksum);
Opts.CoverageNoFunctionNamesInData =
Args.hasArg(OPT_coverage_no_function_names_in_data);
+ Opts.ProfileFilterFiles =
+ Args.getLastArgValue(OPT_fprofile_filter_files_EQ);
+ Opts.ProfileExcludeFiles =
+ Args.getLastArgValue(OPT_fprofile_exclude_files_EQ);
Opts.CoverageExitBlockBeforeBody =
Args.hasArg(OPT_coverage_exit_block_before_body);
if (Args.hasArg(OPT_coverage_version_EQ)) {
@@ -849,7 +992,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
for (const auto &arg : ASL) {
StringRef ArgStr(arg);
Opts.CmdArgs.insert(Opts.CmdArgs.end(), ArgStr.begin(), ArgStr.end());
- // using \00 to seperate each commandline options.
+ // using \00 to separate each commandline options.
Opts.CmdArgs.push_back('\0');
}
}
@@ -960,11 +1103,11 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_fsanitize_cfi_icall_generalize_pointers);
Opts.SanitizeStats = Args.hasArg(OPT_fsanitize_stats);
if (Arg *A = Args.getLastArg(
- OPT_fsanitize_address_poison_class_member_array_new_cookie,
- OPT_fno_sanitize_address_poison_class_member_array_new_cookie)) {
- Opts.SanitizeAddressPoisonClassMemberArrayNewCookie =
+ OPT_fsanitize_address_poison_custom_array_cookie,
+ OPT_fno_sanitize_address_poison_custom_array_cookie)) {
+ Opts.SanitizeAddressPoisonCustomArrayCookie =
A->getOption().getID() ==
- OPT_fsanitize_address_poison_class_member_array_new_cookie;
+ OPT_fsanitize_address_poison_custom_array_cookie;
}
if (Arg *A = Args.getLastArg(OPT_fsanitize_address_use_after_scope,
OPT_fno_sanitize_address_use_after_scope)) {
@@ -973,6 +1116,11 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
Opts.SanitizeAddressGlobalsDeadStripping =
Args.hasArg(OPT_fsanitize_address_globals_dead_stripping);
+ if (Arg *A = Args.getLastArg(OPT_fsanitize_address_use_odr_indicator,
+ OPT_fno_sanitize_address_use_odr_indicator)) {
+ Opts.SanitizeAddressUseOdrIndicator =
+ A->getOption().getID() == OPT_fsanitize_address_use_odr_indicator;
+ }
Opts.SSPBufferSize =
getLastArgIntValue(Args, OPT_stack_protector_buffer_size, 8, Diags);
Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
@@ -1008,6 +1156,10 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
+
+ if (Args.hasArg(OPT_fno_objc_convert_messages_to_runtime_calls))
+ Opts.ObjCConvertMessagesToRuntimeCalls = 0;
+
if (Args.getLastArg(OPT_femulated_tls) ||
Args.getLastArg(OPT_fno_emulated_tls)) {
Opts.ExplicitEmulatedTLS = true;
@@ -1130,8 +1282,9 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.Addrsig = Args.hasArg(OPT_faddrsig);
- if (Arg *A = Args.getLastArg(OPT_msign_return_address)) {
+ if (Arg *A = Args.getLastArg(OPT_msign_return_address_EQ)) {
StringRef SignScope = A->getValue();
+
if (SignScope.equals_lower("none"))
Opts.setSignReturnAddress(CodeGenOptions::SignReturnAddressScope::None);
else if (SignScope.equals_lower("all"))
@@ -1141,9 +1294,26 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
CodeGenOptions::SignReturnAddressScope::NonLeaf);
else
Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue();
+ << A->getAsString(Args) << SignScope;
+
+ if (Arg *A = Args.getLastArg(OPT_msign_return_address_key_EQ)) {
+ StringRef SignKey = A->getValue();
+ if (!SignScope.empty() && !SignKey.empty()) {
+ if (SignKey.equals_lower("a_key"))
+ Opts.setSignReturnAddressKey(
+ CodeGenOptions::SignReturnAddressKeyValue::AKey);
+ else if (SignKey.equals_lower("b_key"))
+ Opts.setSignReturnAddressKey(
+ CodeGenOptions::SignReturnAddressKeyValue::BKey);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << SignKey;
+ }
+ }
}
+ Opts.BranchTargetEnforcement = Args.hasArg(OPT_mbranch_target_enforce);
+
Opts.KeepStaticConsts = Args.hasArg(OPT_fkeep_static_consts);
Opts.SpeculativeLoadHardening = Args.hasArg(OPT_mspeculative_load_hardening);
@@ -1339,7 +1509,7 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Success = false;
}
else
- llvm::sort(Opts.VerifyPrefixes.begin(), Opts.VerifyPrefixes.end());
+ llvm::sort(Opts.VerifyPrefixes);
DiagnosticLevelMask DiagMask = DiagnosticLevelMask::None;
Success &= parseDiagnosticLevelMask("-verify-ignore-unexpected=",
Args.getAllArgValues(OPT_verify_ignore_unexpected_EQ),
@@ -1448,7 +1618,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::EmitObj; break;
case OPT_fixit_EQ:
Opts.FixItSuffix = A->getValue();
- // fall-through!
+ LLVM_FALLTHROUGH;
case OPT_fixit:
Opts.ProgramAction = frontend::FixIt; break;
case OPT_emit_module:
@@ -1459,8 +1629,6 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::GenerateHeaderModule; break;
case OPT_emit_pch:
Opts.ProgramAction = frontend::GeneratePCH; break;
- case OPT_emit_pth:
- Opts.ProgramAction = frontend::GeneratePTH; break;
case OPT_init_only:
Opts.ProgramAction = frontend::InitOnly; break;
case OPT_fsyntax_only:
@@ -1469,8 +1637,6 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::ModuleFileInfo; break;
case OPT_verify_pch:
Opts.ProgramAction = frontend::VerifyPCH; break;
- case OPT_print_decl_contexts:
- Opts.ProgramAction = frontend::PrintDeclContext; break;
case OPT_print_preamble:
Opts.ProgramAction = frontend::PrintPreamble; break;
case OPT_E:
@@ -1891,7 +2057,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
if (IK.getLanguage() == InputKind::Asm) {
Opts.AsmPreprocessor = 1;
} else if (IK.isObjectiveC()) {
- Opts.ObjC1 = Opts.ObjC2 = 1;
+ Opts.ObjC = 1;
}
if (LangStd == LangStandard::lang_unspecified) {
@@ -2154,6 +2320,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
}
+ if (Args.hasArg(OPT_fno_dllexport_inlines))
+ Opts.DllExportInlines = false;
+
if (const Arg *A = Args.getLastArg(OPT_fcf_protection_EQ)) {
StringRef Name = A->getValue();
if (Name == "full" || Name == "branch") {
@@ -2220,9 +2389,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Opts.CUDAIsDevice && Args.hasArg(OPT_fcuda_approx_transcendentals))
Opts.CUDADeviceApproxTranscendentals = 1;
- Opts.CUDARelocatableDeviceCode = Args.hasArg(OPT_fcuda_rdc);
+ Opts.GPURelocatableDeviceCode = Args.hasArg(OPT_fgpu_rdc);
- if (Opts.ObjC1) {
+ if (Opts.ObjC) {
if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
StringRef value = arg->getValue();
if (Opts.ObjCRuntime.tryParse(value))
@@ -2289,8 +2458,19 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_print_ivar_layout))
Opts.ObjCGCBitmapPrint = 1;
+
if (Args.hasArg(OPT_fno_constant_cfstrings))
Opts.NoConstantCFStrings = 1;
+ if (const auto *A = Args.getLastArg(OPT_fcf_runtime_abi_EQ))
+ Opts.CFRuntime =
+ llvm::StringSwitch<LangOptions::CoreFoundationABI>(A->getValue())
+ .Cases("unspecified", "standalone", "objc",
+ LangOptions::CoreFoundationABI::ObjectiveC)
+ .Cases("swift", "swift-5.0",
+ LangOptions::CoreFoundationABI::Swift5_0)
+ .Case("swift-4.2", LangOptions::CoreFoundationABI::Swift4_2)
+ .Case("swift-4.1", LangOptions::CoreFoundationABI::Swift4_1)
+ .Default(LangOptions::CoreFoundationABI::ObjectiveC);
if (Args.hasArg(OPT_fzvector))
Opts.ZVector = 1;
@@ -2315,6 +2495,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fvisibility_inlines_hidden))
Opts.InlineVisibilityHidden = 1;
+ if (Args.hasArg(OPT_fvisibility_global_new_delete_hidden))
+ Opts.GlobalAllocationFunctionVisibilityHidden = 1;
+
if (Args.hasArg(OPT_ftrapv)) {
Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
// Set the handler, if one is specified.
@@ -2338,7 +2521,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
VT.getSubminor().getValueOr(0);
}
- // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs
+ // Mimicking gcc's behavior, trigraphs are only enabled if -trigraphs
// is specified, or -std is set to a conforming mode.
// Trigraphs are disabled by default in c++1z onwards.
Opts.Trigraphs = !Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus17;
@@ -2419,7 +2602,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ImplicitModules = !Args.hasArg(OPT_fno_implicit_modules);
Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
- Opts.Char8 = Args.hasArg(OPT_fchar8__t);
+ Opts.Char8 = Args.hasFlag(OPT_fchar8__t, OPT_fno_char8__t, Opts.CPlusPlus2a);
if (const Arg *A = Args.getLastArg(OPT_fwchar_type_EQ)) {
Opts.WCharSize = llvm::StringSwitch<unsigned>(A->getValue())
.Case("char", 1)
@@ -2502,7 +2685,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.CurrentModule = Opts.ModuleName;
Opts.AppExt = Args.hasArg(OPT_fapplication_extension);
Opts.ModuleFeatures = Args.getAllArgValues(OPT_fmodule_feature);
- llvm::sort(Opts.ModuleFeatures.begin(), Opts.ModuleFeatures.end());
+ llvm::sort(Opts.ModuleFeatures);
Opts.NativeHalfType |= Args.hasArg(OPT_fnative_half_type);
Opts.NativeHalfArgsAndReturns |= Args.hasArg(OPT_fnative_half_arguments_and_returns);
// Enable HalfArgsAndReturns if present in Args or if NativeHalfArgsAndReturns
@@ -2651,6 +2834,14 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.Exceptions = 0;
Opts.CXXExceptions = 0;
}
+ if (Opts.OpenMPIsDevice && T.isNVPTX()) {
+ Opts.OpenMPCUDANumSMs =
+ getLastArgIntValue(Args, options::OPT_fopenmp_cuda_number_of_sm_EQ,
+ Opts.OpenMPCUDANumSMs, Diags);
+ Opts.OpenMPCUDABlocksPerSM =
+ getLastArgIntValue(Args, options::OPT_fopenmp_cuda_blocks_per_sm_EQ,
+ Opts.OpenMPCUDABlocksPerSM, Diags);
+ }
// Get the OpenMP target triples if any.
if (Arg *A = Args.getLastArg(options::OPT_fopenmp_targets_EQ)) {
@@ -2807,6 +2998,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.setClangABICompat(LangOptions::ClangABI::Ver4);
else if (Major <= 6)
Opts.setClangABICompat(LangOptions::ClangABI::Ver6);
+ else if (Major <= 7)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver7);
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -2835,12 +3028,10 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::GenerateModuleInterface:
case frontend::GenerateHeaderModule:
case frontend::GeneratePCH:
- case frontend::GeneratePTH:
case frontend::ParseSyntaxOnly:
case frontend::ModuleFileInfo:
case frontend::VerifyPCH:
case frontend::PluginAction:
- case frontend::PrintDeclContext:
case frontend::RewriteObjC:
case frontend::RewriteTest:
case frontend::RunAnalysis:
@@ -2865,15 +3056,10 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags,
frontend::ActionKind Action) {
Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
- Opts.ImplicitPTHInclude = Args.getLastArgValue(OPT_include_pth);
Opts.PCHWithHdrStop = Args.hasArg(OPT_pch_through_hdrstop_create) ||
Args.hasArg(OPT_pch_through_hdrstop_use);
Opts.PCHWithHdrStopCreate = Args.hasArg(OPT_pch_through_hdrstop_create);
Opts.PCHThroughHeader = Args.getLastArgValue(OPT_pch_through_header_EQ);
- if (const Arg *A = Args.getLastArg(OPT_token_cache))
- Opts.TokenCache = A->getValue();
- else
- Opts.TokenCache = Opts.ImplicitPTHInclude;
Opts.UsePredefines = !Args.hasArg(OPT_undef);
Opts.DetailedRecord = Args.hasArg(OPT_detailed_preprocessing_record);
Opts.DisablePCHValidation = Args.hasArg(OPT_fno_validate_pch);
@@ -2978,6 +3164,7 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
+ Opts.CodeModel = getCodeModel(Args, Diags);
Opts.ABI = Args.getLastArgValue(OPT_target_abi);
if (Arg *A = Args.getLastArg(OPT_meabi)) {
StringRef Value = A->getValue();
@@ -3058,6 +3245,7 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Res.getTargetOpts(), Res.getFrontendOpts());
ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args,
Res.getFileSystemOpts().WorkingDir);
+ llvm::Triple T(Res.getTargetOpts().Triple);
if (DashX.getFormat() == InputKind::Precompiled ||
DashX.getLanguage() == InputKind::LLVM_IR) {
// ObjCAAutoRefCount and Sanitize LangOpts are used to setup the
@@ -3072,12 +3260,18 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
Diags, LangOpts.Sanitize);
} else {
- // Other LangOpts are only initialzed when the input is not AST or LLVM IR.
+ // Other LangOpts are only initialized when the input is not AST or LLVM IR.
// FIXME: Should we really be calling this for an InputKind::Asm input?
ParseLangArgs(LangOpts, Args, DashX, Res.getTargetOpts(),
Res.getPreprocessorOpts(), Diags);
if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
LangOpts.ObjCExceptions = 1;
+ if (T.isOSDarwin() && DashX.isPreprocessed()) {
+ // Supress the darwin-specific 'stdlibcxx-not-found' diagnostic for
+ // preprocessed input as we don't expect it to be used with -std=libc++
+ // anyway.
+ Res.getDiagnosticOpts().Warnings.push_back("no-stdlibcxx-not-found");
+ }
}
LangOpts.FunctionAlignment =
@@ -3109,7 +3303,6 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Res.getFrontendOpts().ProgramAction);
// Turn on -Wspir-compat for SPIR target.
- llvm::Triple T(Res.getTargetOpts().Triple);
auto Arch = T.getArch();
if (Arch == llvm::Triple::spir || Arch == llvm::Triple::spir64) {
Res.getDiagnosticOpts().Warnings.push_back("spir-compat");
@@ -3193,6 +3386,12 @@ std::string CompilerInvocation::getModuleHash() const {
code = ext->hashExtension(code);
}
+ // When compiling with -gmodules, also hash -fdebug-prefix-map as it
+ // affects the debug info in the PCM.
+ if (getCodeGenOpts().DebugTypeExtRefs)
+ for (const auto &KeyValue : getCodeGenOpts().DebugPrefixMap)
+ code = hash_combine(code, KeyValue.first, KeyValue.second);
+
// Extend the signature with the enabled sanitizers, if at least one is
// enabled. Sanitizers which cannot affect AST generation aren't hashed.
SanitizerSet SanHash = LangOpts->Sanitize;
@@ -3232,53 +3431,40 @@ uint64_t getLastArgUInt64Value(const ArgList &Args, OptSpecifier Id,
return getLastArgIntValueImpl<uint64_t>(Args, Id, Default, Diags);
}
-void BuryPointer(const void *Ptr) {
- // This function may be called only a small fixed amount of times per each
- // invocation, otherwise we do actually have a leak which we want to report.
- // If this function is called more than kGraveYardMaxSize times, the pointers
- // will not be properly buried and a leak detector will report a leak, which
- // is what we want in such case.
- static const size_t kGraveYardMaxSize = 16;
- LLVM_ATTRIBUTE_UNUSED static const void *GraveYard[kGraveYardMaxSize];
- static std::atomic<unsigned> GraveYardSize;
- unsigned Idx = GraveYardSize++;
- if (Idx >= kGraveYardMaxSize)
- return;
- GraveYard[Idx] = Ptr;
-}
-
-IntrusiveRefCntPtr<vfs::FileSystem>
+IntrusiveRefCntPtr<llvm::vfs::FileSystem>
createVFSFromCompilerInvocation(const CompilerInvocation &CI,
DiagnosticsEngine &Diags) {
- return createVFSFromCompilerInvocation(CI, Diags, vfs::getRealFileSystem());
+ return createVFSFromCompilerInvocation(CI, Diags,
+ llvm::vfs::getRealFileSystem());
}
-IntrusiveRefCntPtr<vfs::FileSystem>
-createVFSFromCompilerInvocation(const CompilerInvocation &CI,
- DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<vfs::FileSystem> BaseFS) {
+IntrusiveRefCntPtr<llvm::vfs::FileSystem> createVFSFromCompilerInvocation(
+ const CompilerInvocation &CI, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS) {
if (CI.getHeaderSearchOpts().VFSOverlayFiles.empty())
return BaseFS;
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> Overlay(
- new vfs::OverlayFileSystem(BaseFS));
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> Result = BaseFS;
// earlier vfs files are on the bottom
for (const auto &File : CI.getHeaderSearchOpts().VFSOverlayFiles) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
- BaseFS->getBufferForFile(File);
+ Result->getBufferForFile(File);
if (!Buffer) {
Diags.Report(diag::err_missing_vfs_overlay_file) << File;
continue;
}
- IntrusiveRefCntPtr<vfs::FileSystem> FS = vfs::getVFSFromYAML(
- std::move(Buffer.get()), /*DiagHandler*/ nullptr, File);
- if (FS)
- Overlay->pushOverlay(FS);
- else
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS = llvm::vfs::getVFSFromYAML(
+ std::move(Buffer.get()), /*DiagHandler*/ nullptr, File,
+ /*DiagContext*/ nullptr, Result);
+ if (!FS) {
Diags.Report(diag::err_invalid_vfs_overlay) << File;
+ continue;
+ }
+
+ Result = FS;
}
- return Overlay;
+ return Result;
}
} // namespace clang
diff --git a/lib/Frontend/CreateInvocationFromCommandLine.cpp b/lib/Frontend/CreateInvocationFromCommandLine.cpp
index c3ce7ce2b7..2d4c40f8b9 100644
--- a/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -32,7 +32,7 @@ using namespace llvm::opt;
/// argument vector.
std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
ArrayRef<const char *> ArgList, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!Diags.get()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index fdccd9134e..a03d4b79c8 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -17,7 +17,6 @@
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/DirectoryLookup.h"
-#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index d24700535e..83152bd353 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -26,6 +26,7 @@
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/GlobalModuleIndex.h"
+#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -276,7 +277,7 @@ static void addHeaderInclude(StringRef HeaderName,
bool IsExternC) {
if (IsExternC && LangOpts.CPlusPlus)
Includes += "extern \"C\" {\n";
- if (LangOpts.ObjC1)
+ if (LangOpts.ObjC)
Includes += "#import \"";
else
Includes += "#include \"";
@@ -342,8 +343,8 @@ static std::error_code collectModuleHeaderIncludes(
SmallString<128> DirNative;
llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
Dir != End && !EC; Dir.increment(EC)) {
// Check whether this entry has an extension typically associated with
// headers.
@@ -568,7 +569,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
auto &MM = ASTReader->getModuleManager();
auto &PrimaryModule = MM.getPrimaryModule();
- for (ModuleFile &MF : MM)
+ for (serialization::ModuleFile &MF : MM)
if (&MF != &PrimaryModule)
CI.getFrontendOpts().ModuleFiles.push_back(MF.FileName);
@@ -696,8 +697,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
SmallString<128> DirNative;
llvm::sys::path::native(PCHDir->getName(), DirNative);
bool Found = false;
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC),
+ DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
// Check whether this is an acceptable AST file.
if (ASTReader::isAcceptableASTFile(
@@ -948,7 +950,7 @@ void FrontendAction::EndSourceFile() {
if (DisableFree) {
CI.resetAndLeakSema();
CI.resetAndLeakASTContext();
- BuryPointer(CI.takeASTConsumer().get());
+ llvm::BuryPointer(CI.takeASTConsumer().get());
} else {
CI.setSema(nullptr);
CI.setASTContext(nullptr);
@@ -973,7 +975,7 @@ void FrontendAction::EndSourceFile() {
CI.resetAndLeakPreprocessor();
CI.resetAndLeakSourceManager();
CI.resetAndLeakFileManager();
- BuryPointer(CurrentASTUnit.release());
+ llvm::BuryPointer(std::move(CurrentASTUnit));
} else {
CI.setPreprocessor(nullptr);
CI.setSourceManager(nullptr);
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index 08d91e6a5a..a407dfc162 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -92,12 +92,6 @@ ASTViewAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
}
std::unique_ptr<ASTConsumer>
-DeclContextPrintAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
- return CreateDeclContextPrinter();
-}
-
-std::unique_ptr<ASTConsumer>
GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
std::string Sysroot;
if (!ComputeASTConsumerArguments(CI, /*ref*/ Sysroot))
@@ -756,16 +750,6 @@ void DumpTokensAction::ExecuteAction() {
} while (Tok.isNot(tok::eof));
}
-void GeneratePTHAction::ExecuteAction() {
- CompilerInstance &CI = getCompilerInstance();
- std::unique_ptr<raw_pwrite_stream> OS =
- CI.createDefaultOutputFile(true, getCurrentFile());
- if (!OS)
- return;
-
- CacheTokens(CI.getPreprocessor(), OS.get());
-}
-
void PreprocessOnlyAction::ExecuteAction() {
Preprocessor &PP = getCompilerInstance().getPreprocessor();
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index 8a70404629..ac3bb713dd 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -260,6 +260,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
switch (os) {
case llvm::Triple::Linux:
+ case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
llvm_unreachable("Include management is handled in the driver.");
@@ -412,6 +413,7 @@ void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(
switch (os) {
case llvm::Triple::Linux:
+ case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
llvm_unreachable("Include management is handled in the driver.");
break;
@@ -460,6 +462,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
break; // Everything else continues to use this routine's logic.
case llvm::Triple::Linux:
+ case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
return;
@@ -473,22 +476,6 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
if (Lang.CPlusPlus && !Lang.AsmPreprocessor &&
HSOpts.UseStandardCXXIncludes && HSOpts.UseStandardSystemIncludes) {
if (HSOpts.UseLibcxx) {
- if (triple.isOSDarwin()) {
- // On Darwin, libc++ may be installed alongside the compiler in
- // include/c++/v1.
- if (!HSOpts.ResourceDir.empty()) {
- // Remove version from foo/lib/clang/version
- StringRef NoVer = llvm::sys::path::parent_path(HSOpts.ResourceDir);
- // Remove clang from foo/lib/clang
- StringRef Lib = llvm::sys::path::parent_path(NoVer);
- // Remove lib from foo/lib
- SmallString<128> P = llvm::sys::path::parent_path(Lib);
-
- // Get foo/include/c++/v1
- llvm::sys::path::append(P, "include", "c++", "v1");
- AddUnmappedPath(P, CXXSystem, false);
- }
- }
AddPath("/usr/include/c++/v1", CXXSystem, false);
} else {
AddDefaultCPlusPlusIncludePaths(Lang, triple, HSOpts);
@@ -616,11 +603,11 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
for (auto &Include : IncludePath)
if (Include.first == System || Include.first == ExternCSystem ||
- (!Lang.ObjC1 && !Lang.CPlusPlus && Include.first == CSystem) ||
- (/*FIXME !Lang.ObjC1 && */ Lang.CPlusPlus &&
+ (!Lang.ObjC && !Lang.CPlusPlus && Include.first == CSystem) ||
+ (/*FIXME !Lang.ObjC && */ Lang.CPlusPlus &&
Include.first == CXXSystem) ||
- (Lang.ObjC1 && !Lang.CPlusPlus && Include.first == ObjCSystem) ||
- (Lang.ObjC1 && Lang.CPlusPlus && Include.first == ObjCXXSystem))
+ (Lang.ObjC && !Lang.CPlusPlus && Include.first == ObjCSystem) ||
+ (Lang.ObjC && Lang.CPlusPlus && Include.first == ObjCXXSystem))
SearchList.push_back(Include.second);
for (auto &Include : IncludePath)
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index e576fc098d..77b2f479a7 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -21,7 +21,6 @@
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
-#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTReader.h"
@@ -76,23 +75,6 @@ static void AddImplicitIncludeMacros(MacroBuilder &Builder, StringRef File) {
Builder.append("##"); // ##?
}
-/// AddImplicitIncludePTH - Add an implicit \#include using the original file
-/// used to generate a PTH cache.
-static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
- StringRef ImplicitIncludePTH) {
- PTHManager *P = PP.getPTHManager();
- // Null check 'P' in the corner case where it couldn't be created.
- const char *OriginalFile = P ? P->getOriginalSourceFile() : nullptr;
-
- if (!OriginalFile) {
- PP.getDiagnostics().Report(diag::err_fe_pth_file_has_no_source_header)
- << ImplicitIncludePTH;
- return;
- }
-
- AddImplicitInclude(Builder, OriginalFile);
-}
-
/// Add an implicit \#include using the original file used to generate
/// a PCH file.
static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
@@ -421,7 +403,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__STDC_UTF_16__", "1");
Builder.defineMacro("__STDC_UTF_32__", "1");
- if (LangOpts.ObjC1)
+ if (LangOpts.ObjC)
Builder.defineMacro("__OBJC__");
// OpenCL v1.0/1.1 s6.9, v1.2/2.0 s6.10: Preprocessor Directives and Macros.
@@ -558,15 +540,15 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
if (LangOpts.RelaxedTemplateTemplateArgs)
Builder.defineMacro("__cpp_template_template_args", "201611L");
+ // C++20 features.
+ if (LangOpts.Char8)
+ Builder.defineMacro("__cpp_char8_t", "201811L");
+
// TS features.
if (LangOpts.ConceptsTS)
Builder.defineMacro("__cpp_experimental_concepts", "1L");
if (LangOpts.CoroutinesTS)
Builder.defineMacro("__cpp_coroutines", "201703L");
-
- // Potential future breaking changes.
- if (LangOpts.Char8)
- Builder.defineMacro("__cpp_char8_t", "201803L");
}
static void InitializePredefinedMacros(const TargetInfo &TI,
@@ -635,7 +617,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (!LangOpts.MSVCCompat && LangOpts.CPlusPlus11)
Builder.defineMacro("__GXX_EXPERIMENTAL_CXX0X__");
- if (LangOpts.ObjC1) {
+ if (LangOpts.ObjC) {
if (LangOpts.ObjCRuntime.isNonFragile()) {
Builder.defineMacro("__OBJC2__");
@@ -699,7 +681,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (!LangOpts.NoConstantCFStrings)
Builder.defineMacro("__CONSTANT_CFSTRINGS__");
- if (LangOpts.ObjC2)
+ if (LangOpts.ObjC)
Builder.defineMacro("OBJC_NEW_PROPERTIES");
if (LangOpts.PascalStrings)
@@ -1016,7 +998,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__strong", "__attribute__((objc_gc(strong)))");
Builder.defineMacro("__autoreleasing", "");
Builder.defineMacro("__unsafe_unretained", "");
- } else if (LangOpts.ObjC1) {
+ } else if (LangOpts.ObjC) {
Builder.defineMacro("__weak", "__attribute__((objc_ownership(weak)))");
Builder.defineMacro("__strong", "__attribute__((objc_ownership(strong)))");
Builder.defineMacro("__autoreleasing",
@@ -1130,7 +1112,7 @@ void clang::InitializePreprocessor(
// Install definitions to make Objective-C++ ARC work well with various
// C++ Standard Library implementations.
- if (LangOpts.ObjC1 && LangOpts.CPlusPlus &&
+ if (LangOpts.ObjC && LangOpts.CPlusPlus &&
(LangOpts.ObjCAutoRefCount || LangOpts.ObjCWeak)) {
switch (InitOpts.ObjCXXARCStandardLibrary) {
case ARCXX_nolib:
@@ -1177,8 +1159,6 @@ void clang::InitializePreprocessor(
if (!InitOpts.ImplicitPCHInclude.empty())
AddImplicitIncludePCH(Builder, PP, PCHContainerRdr,
InitOpts.ImplicitPCHInclude);
- if (!InitOpts.ImplicitPTHInclude.empty())
- AddImplicitIncludePTH(Builder, PP, InitOpts.ImplicitPTHInclude);
// Process -include directives.
for (unsigned i = 0, e = InitOpts.Includes.size(); i != e; ++i) {
diff --git a/lib/Frontend/ModuleDependencyCollector.cpp b/lib/Frontend/ModuleDependencyCollector.cpp
index 25cad8be6d..fa8efcc3b5 100644
--- a/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/lib/Frontend/ModuleDependencyCollector.cpp
@@ -156,10 +156,6 @@ void ModuleDependencyCollector::writeFileMap() {
// allows crash reproducer scripts to work across machines.
VFSWriter.setOverlayDir(VFSDir);
- // Do not ignore non existent contents otherwise we might skip something
- // that should have been collected here.
- VFSWriter.setIgnoreNonExistentContents(false);
-
// Explicitly set case sensitivity for the YAML writer. For that, find out
// the sensitivity at the path where the headers all collected to.
VFSWriter.setCaseSensitivity(isCaseSensitivePath(VFSDir));
diff --git a/lib/Frontend/PrecompiledPreamble.cpp b/lib/Frontend/PrecompiledPreamble.cpp
index c6dd89c05c..1930af187e 100644
--- a/lib/Frontend/PrecompiledPreamble.cpp
+++ b/lib/Frontend/PrecompiledPreamble.cpp
@@ -14,7 +14,6 @@
#include "clang/Frontend/PrecompiledPreamble.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
@@ -30,6 +29,7 @@
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <limits>
#include <utility>
@@ -48,17 +48,17 @@ StringRef getInMemoryPreamblePath() {
#endif
}
-IntrusiveRefCntPtr<vfs::FileSystem>
+IntrusiveRefCntPtr<llvm::vfs::FileSystem>
createVFSOverlayForPreamblePCH(StringRef PCHFilename,
std::unique_ptr<llvm::MemoryBuffer> PCHBuffer,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
// We want only the PCH file from the real filesystem to be available,
// so we create an in-memory VFS with just that and overlay it on top.
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> PCHFS(
- new vfs::InMemoryFileSystem());
+ IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> PCHFS(
+ new llvm::vfs::InMemoryFileSystem());
PCHFS->addFile(PCHFilename, 0, std::move(PCHBuffer));
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> Overlay(
- new vfs::OverlayFileSystem(VFS));
+ IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> Overlay(
+ new llvm::vfs::OverlayFileSystem(VFS));
Overlay->pushOverlay(PCHFS);
return Overlay;
}
@@ -232,7 +232,8 @@ PreambleBounds clang::ComputePreambleBounds(const LangOptions &LangOpts,
llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
const CompilerInvocation &Invocation,
const llvm::MemoryBuffer *MainFileBuffer, PreambleBounds Bounds,
- DiagnosticsEngine &Diagnostics, IntrusiveRefCntPtr<vfs::FileSystem> VFS,
+ DiagnosticsEngine &Diagnostics,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps, bool StoreInMemory,
PreambleCallbacks &Callbacks) {
assert(VFS && "VFS is null");
@@ -410,7 +411,7 @@ std::size_t PrecompiledPreamble::getSize() const {
bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
const llvm::MemoryBuffer *MainFileBuffer,
PreambleBounds Bounds,
- vfs::FileSystem *VFS) const {
+ llvm::vfs::FileSystem *VFS) const {
assert(
Bounds.Size <= MainFileBuffer->getBufferSize() &&
@@ -437,7 +438,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
// remapping or unsaved_files.
std::map<llvm::sys::fs::UniqueID, PreambleFileHash> OverriddenFiles;
for (const auto &R : PreprocessorOpts.RemappedFiles) {
- vfs::Status Status;
+ llvm::vfs::Status Status;
if (!moveOnNoError(VFS->status(R.second), Status)) {
// If we can't stat the file we're remapping to, assume that something
// horrible happened.
@@ -449,7 +450,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
}
for (const auto &RB : PreprocessorOpts.RemappedFileBuffers) {
- vfs::Status Status;
+ llvm::vfs::Status Status;
if (!moveOnNoError(VFS->status(RB.first), Status))
return false;
@@ -459,7 +460,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
// Check whether anything has changed.
for (const auto &F : FilesInPreamble) {
- vfs::Status Status;
+ llvm::vfs::Status Status;
if (!moveOnNoError(VFS->status(F.first()), Status)) {
// If we can't stat the file, assume that something horrible happened.
return false;
@@ -485,14 +486,14 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
}
void PrecompiledPreamble::AddImplicitPreamble(
- CompilerInvocation &CI, IntrusiveRefCntPtr<vfs::FileSystem> &VFS,
+ CompilerInvocation &CI, IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS,
llvm::MemoryBuffer *MainFileBuffer) const {
PreambleBounds Bounds(PreambleBytes.size(), PreambleEndsAtStartOfLine);
configurePreamble(Bounds, CI, VFS, MainFileBuffer);
}
void PrecompiledPreamble::OverridePreamble(
- CompilerInvocation &CI, IntrusiveRefCntPtr<vfs::FileSystem> &VFS,
+ CompilerInvocation &CI, IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS,
llvm::MemoryBuffer *MainFileBuffer) const {
auto Bounds = ComputePreambleBounds(*CI.getLangOpts(), MainFileBuffer, 0);
configurePreamble(Bounds, CI, VFS, MainFileBuffer);
@@ -680,7 +681,7 @@ PrecompiledPreamble::PreambleFileHash::createForMemoryBuffer(
void PrecompiledPreamble::configurePreamble(
PreambleBounds Bounds, CompilerInvocation &CI,
- IntrusiveRefCntPtr<vfs::FileSystem> &VFS,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS,
llvm::MemoryBuffer *MainFileBuffer) const {
assert(VFS);
@@ -701,13 +702,14 @@ void PrecompiledPreamble::configurePreamble(
void PrecompiledPreamble::setupPreambleStorage(
const PCHStorage &Storage, PreprocessorOptions &PreprocessorOpts,
- IntrusiveRefCntPtr<vfs::FileSystem> &VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS) {
if (Storage.getKind() == PCHStorage::Kind::TempFile) {
const TempPCHFile &PCHFile = Storage.asFile();
PreprocessorOpts.ImplicitPCHInclude = PCHFile.getFilePath();
// Make sure we can access the PCH file even if we're using a VFS
- IntrusiveRefCntPtr<vfs::FileSystem> RealFS = vfs::getRealFileSystem();
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> RealFS =
+ llvm::vfs::getRealFileSystem();
auto PCHPath = PCHFile.getFilePath();
if (VFS == RealFS || VFS->exists(PCHPath))
return;
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
index 69cd072ea5..3b835985a5 100644
--- a/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -750,6 +750,11 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
reinterpret_cast<Module *>(Tok.getAnnotationValue()));
PP.Lex(Tok);
continue;
+ } else if (Tok.isAnnotation()) {
+ // Ignore annotation tokens created by pragmas - the pragmas themselves
+ // will be reproduced in the preprocessed output.
+ PP.Lex(Tok);
+ continue;
} else if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
OS << II->getName();
} else if (Tok.isLiteral() && !Tok.needsCleaning() &&
diff --git a/lib/Frontend/Rewrite/InclusionRewriter.cpp b/lib/Frontend/Rewrite/InclusionRewriter.cpp
index 1631eccd70..2e7baa3d95 100644
--- a/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -586,6 +586,7 @@ void InclusionRewriter::Process(FileID FileId,
LocalEOL, Line, /*EnsureNewline=*/ true);
WriteLineInfo(FileName, Line, FileType);
RawLex.SetKeepWhitespaceMode(false);
+ break;
}
default:
break;
diff --git a/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index c1abc54ae8..8c2d4994bf 100644
--- a/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -541,7 +541,7 @@ namespace {
// FIXME: This predicate seems like it would be useful to add to ASTContext.
bool isObjCType(QualType T) {
- if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ if (!LangOpts.ObjC)
return false;
QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
@@ -3099,10 +3099,9 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
SmallVectorImpl<Expr*> &MsgExprs,
ObjCMethodDecl *Method) {
// Now do the "normal" pointer to function cast.
- QualType castType = getSimpleFunctionType(returnType, ArgTypes,
- Method ? Method->isVariadic()
- : false);
- castType = Context->getPointerType(castType);
+ QualType FuncType = getSimpleFunctionType(
+ returnType, ArgTypes, Method ? Method->isVariadic() : false);
+ QualType castType = Context->getPointerType(FuncType);
// build type for containing the objc_msgSend_stret object.
static unsigned stretCount=0;
@@ -3176,9 +3175,9 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
// AST for __Stretn(receiver, args).s;
IdentifierInfo *ID = &Context->Idents.get(name);
- FunctionDecl *FD = FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
- SourceLocation(), ID, castType,
- nullptr, SC_Extern, false, false);
+ FunctionDecl *FD =
+ FunctionDecl::Create(*Context, TUDecl, SourceLocation(), SourceLocation(),
+ ID, FuncType, nullptr, SC_Extern, false, false);
DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, castType, VK_RValue,
SourceLocation());
CallExpr *STCE = new (Context) CallExpr(*Context, DRE, MsgExprs,
diff --git a/lib/Frontend/Rewrite/RewriteObjC.cpp b/lib/Frontend/Rewrite/RewriteObjC.cpp
index 8d950e70f9..5a5c81b061 100644
--- a/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -448,7 +448,7 @@ namespace {
// FIXME: This predicate seems like it would be useful to add to ASTContext.
bool isObjCType(QualType T) {
- if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ if (!LangOpts.ObjC)
return false;
QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
diff --git a/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 4deebcd477..df360de857 100644
--- a/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/FrontendTool/Utils.h"
#include "clang/ARCMigrate/ARCMTActions.h"
#include "clang/CodeGen/CodeGenAction.h"
#include "clang/Config/config.h"
@@ -23,10 +22,12 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/Frontend/Utils.h"
+#include "clang/FrontendTool/Utils.h"
#include "clang/Rewrite/Frontend/FrontendActions.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
+#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -64,7 +65,6 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case GenerateHeaderModule:
return llvm::make_unique<GenerateHeaderModuleAction>();
case GeneratePCH: return llvm::make_unique<GeneratePCHAction>();
- case GeneratePTH: return llvm::make_unique<GeneratePTHAction>();
case InitOnly: return llvm::make_unique<InitOnlyAction>();
case ParseSyntaxOnly: return llvm::make_unique<SyntaxOnlyAction>();
case ModuleFileInfo: return llvm::make_unique<DumpModuleInfoAction>();
@@ -90,7 +90,6 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
return nullptr;
}
- case PrintDeclContext: return llvm::make_unique<DeclContextPrintAction>();
case PrintPreamble: return llvm::make_unique<PrintPreambleAction>();
case PrintPreprocessedInput: {
if (CI.getPreprocessorOutputOpts().RewriteIncludes ||
@@ -184,7 +183,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
// Honor -help.
if (Clang->getFrontendOpts().ShowHelp) {
std::unique_ptr<OptTable> Opts = driver::createDriverOptTable();
- Opts->PrintHelp(llvm::outs(), "clang -cc1",
+ Opts->PrintHelp(llvm::outs(), "clang -cc1 [options] file...",
"LLVM 'Clang' Compiler: http://clang.llvm.org",
/*Include=*/driver::options::CC1Option,
/*Exclude=*/0, /*ShowAllAliases=*/false);
@@ -242,11 +241,19 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
ento::printCheckerHelp(llvm::outs(), Clang->getFrontendOpts().Plugins);
return true;
}
+
+ // Honor -analyzer-list-enabled-checkers.
if (Clang->getAnalyzerOpts()->ShowEnabledCheckerList) {
ento::printEnabledCheckerList(llvm::outs(),
Clang->getFrontendOpts().Plugins,
*Clang->getAnalyzerOpts());
}
+
+ // Honor -analyzer-config-help.
+ if (Clang->getAnalyzerOpts()->ShowConfigOptionsList) {
+ ento::printAnalyzerConfigList(llvm::outs());
+ return true;
+ }
#endif
// If there were errors in processing arguments, don't do anything else.
@@ -258,7 +265,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
return false;
bool Success = Clang->ExecuteAction(*Act);
if (Clang->getFrontendOpts().DisableFree)
- BuryPointer(std::move(Act));
+ llvm::BuryPointer(std::move(Act));
return Success;
}
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index 1930d8e225..e444c9c870 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -144,7 +144,7 @@ foreach( f ${files} ${cuda_wrapper_files} )
list(APPEND out_files ${dst})
endforeach( f )
-add_custom_command(OUTPUT ${output_dir}/arm_neon.h
+add_custom_command(OUTPUT ${output_dir}/arm_neon.h
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h ${output_dir}/arm_neon.h
COMMENT "Copying clang's arm_neon.h...")
@@ -156,7 +156,9 @@ add_custom_command(OUTPUT ${output_dir}/arm_fp16.h
list(APPEND out_files ${output_dir}/arm_fp16.h)
add_custom_target(clang-headers ALL DEPENDS ${out_files})
-set_target_properties(clang-headers PROPERTIES FOLDER "Misc")
+set_target_properties(clang-headers PROPERTIES
+ FOLDER "Misc"
+ RUNTIME_OUTPUT_DIRECTORY "${output_dir}")
install(
FILES ${files} ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h
diff --git a/lib/Headers/adxintrin.h b/lib/Headers/adxintrin.h
index ee34728417..d6c454db85 100644
--- a/lib/Headers/adxintrin.h
+++ b/lib/Headers/adxintrin.h
@@ -53,7 +53,7 @@ static __inline unsigned char __DEFAULT_FN_ATTRS
_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
unsigned int *__p)
{
- return __builtin_ia32_addcarry_u32(__cf, __x, __y, __p);
+ return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
}
#ifdef __x86_64__
@@ -61,7 +61,7 @@ static __inline unsigned char __DEFAULT_FN_ATTRS
_addcarry_u64(unsigned char __cf, unsigned long long __x,
unsigned long long __y, unsigned long long *__p)
{
- return __builtin_ia32_addcarry_u64(__cf, __x, __y, __p);
+ return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
}
#endif
diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h
index 90fd477d9b..2dc6adb900 100644
--- a/lib/Headers/altivec.h
+++ b/lib/Headers/altivec.h
@@ -9492,49 +9492,51 @@ vec_splat_u32(signed char __a) {
/* vec_sr */
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sr(vector signed char __a, vector unsigned char __b) {
- vector unsigned char __res = (vector unsigned char)__a >> __b;
- return (vector signed char)__res;
-}
-
+// vec_sr does modulo arithmetic on __b first, so __b is allowed to be more
+// than the length of __a.
static __inline__ vector unsigned char __ATTRS_o_ai
vec_sr(vector unsigned char __a, vector unsigned char __b) {
- return __a >> __b;
+ return __a >>
+ (__b % (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_sr(vector signed short __a, vector unsigned short __b) {
- vector unsigned short __res = (vector unsigned short)__a >> __b;
- return (vector signed short)__res;
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sr(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)vec_sr((vector unsigned char)__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_sr(vector unsigned short __a, vector unsigned short __b) {
- return __a >> __b;
+ return __a >>
+ (__b % (vector unsigned short)(sizeof(unsigned short) * __CHAR_BIT__));
}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sr(vector signed int __a, vector unsigned int __b) {
- vector unsigned int __res = (vector unsigned int)__a >> __b;
- return (vector signed int)__res;
+static __inline__ vector short __ATTRS_o_ai vec_sr(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)vec_sr((vector unsigned short)__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_sr(vector unsigned int __a, vector unsigned int __b) {
- return __a >> __b;
+ return __a >>
+ (__b % (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
}
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sr(vector signed long long __a, vector unsigned long long __b) {
- vector unsigned long long __res = (vector unsigned long long)__a >> __b;
- return (vector signed long long)__res;
+static __inline__ vector int __ATTRS_o_ai vec_sr(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)vec_sr((vector unsigned int)__a, __b);
}
+#ifdef __POWER8_VECTOR__
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
- return __a >> __b;
+ return __a >> (__b % (vector unsigned long long)(sizeof(unsigned long long) *
+ __CHAR_BIT__));
+}
+
+static __inline__ vector long long __ATTRS_o_ai
+vec_sr(vector long long __a, vector unsigned long long __b) {
+ return (vector long long)vec_sr((vector unsigned long long)__a, __b);
}
#endif
@@ -9544,12 +9546,12 @@ vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
static __inline__ vector signed char __ATTRS_o_ai
vec_vsrb(vector signed char __a, vector unsigned char __b) {
- return __a >> (vector signed char)__b;
+ return vec_sr(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
- return __a >> __b;
+ return vec_sr(__a, __b);
}
/* vec_vsrh */
@@ -9558,12 +9560,12 @@ vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
static __inline__ vector short __ATTRS_o_ai
vec_vsrh(vector short __a, vector unsigned short __b) {
- return __a >> (vector short)__b;
+ return vec_sr(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
- return __a >> __b;
+ return vec_sr(__a, __b);
}
/* vec_vsrw */
@@ -9572,12 +9574,12 @@ vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a,
vector unsigned int __b) {
- return __a >> (vector int)__b;
+ return vec_sr(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_vsrw(vector unsigned int __a, vector unsigned int __b) {
- return __a >> __b;
+ return vec_sr(__a, __b);
}
/* vec_sra */
@@ -16353,67 +16355,82 @@ vec_revb(vector unsigned __int128 __a) {
/* vec_xl */
+typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
+typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
+typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
+typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1)));
+typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
+typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
+typedef vector float unaligned_vec_float __attribute__((aligned(1)));
+
static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
signed char *__ptr) {
- return *(vector signed char *)(__ptr + __offset);
+ return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
vec_xl(signed long long __offset, unsigned char *__ptr) {
- return *(vector unsigned char *)(__ptr + __offset);
+ return *(unaligned_vec_uchar*)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
signed short *__ptr) {
- return *(vector signed short *)(__ptr + __offset);
+ return *(unaligned_vec_sshort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned short
vec_xl(signed long long __offset, unsigned short *__ptr) {
- return *(vector unsigned short *)(__ptr + __offset);
+ return *(unaligned_vec_ushort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
signed int *__ptr) {
- return *(vector signed int *)(__ptr + __offset);
+ return *(unaligned_vec_sint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
unsigned int *__ptr) {
- return *(vector unsigned int *)(__ptr + __offset);
+ return *(unaligned_vec_uint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
float *__ptr) {
- return *(vector float *)(__ptr + __offset);
+ return *(unaligned_vec_float *)(__ptr + __offset);
}
#ifdef __VSX__
+typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
+typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
+typedef vector double unaligned_vec_double __attribute__((aligned(1)));
+
static inline __ATTRS_o_ai vector signed long long
vec_xl(signed long long __offset, signed long long *__ptr) {
- return *(vector signed long long *)(__ptr + __offset);
+ return *(unaligned_vec_sll *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned long long
vec_xl(signed long long __offset, unsigned long long *__ptr) {
- return *(vector unsigned long long *)(__ptr + __offset);
+ return *(unaligned_vec_ull *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
double *__ptr) {
- return *(vector double *)(__ptr + __offset);
+ return *(unaligned_vec_double *)(__ptr + __offset);
}
#endif
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
+typedef vector unsigned __int128 unaligned_vec_ui128
+ __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
vec_xl(signed long long __offset, signed __int128 *__ptr) {
- return *(vector signed __int128 *)(__ptr + __offset);
+ return *(unaligned_vec_si128 *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned __int128
vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
- return *(vector unsigned __int128 *)(__ptr + __offset);
+ return *(unaligned_vec_ui128 *)(__ptr + __offset);
}
#endif
@@ -16498,62 +16515,62 @@ vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) {
static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
signed long long __offset,
signed char *__ptr) {
- *(vector signed char *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_schar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
signed long long __offset,
unsigned char *__ptr) {
- *(vector unsigned char *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
signed long long __offset,
signed short *__ptr) {
- *(vector signed short *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sshort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
signed long long __offset,
unsigned short *__ptr) {
- *(vector unsigned short *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ushort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
signed long long __offset,
signed int *__ptr) {
- *(vector signed int *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
signed long long __offset,
unsigned int *__ptr) {
- *(vector unsigned int *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_uint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector float __vec,
signed long long __offset,
float *__ptr) {
- *(vector float *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_float *)(__ptr + __offset) = __vec;
}
#ifdef __VSX__
static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
signed long long __offset,
signed long long *__ptr) {
- *(vector signed long long *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sll *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
signed long long __offset,
unsigned long long *__ptr) {
- *(vector unsigned long long *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ull *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector double __vec,
signed long long __offset,
double *__ptr) {
- *(vector double *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_double *)(__ptr + __offset) = __vec;
}
#endif
@@ -16561,13 +16578,13 @@ static inline __ATTRS_o_ai void vec_xst(vector double __vec,
static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
signed long long __offset,
signed __int128 *__ptr) {
- *(vector signed __int128 *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_si128 *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
signed long long __offset,
unsigned __int128 *__ptr) {
- *(vector unsigned __int128 *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ui128 *)(__ptr + __offset) = __vec;
}
#endif
diff --git a/lib/Headers/avx512bwintrin.h b/lib/Headers/avx512bwintrin.h
index bff5b975c1..a90a255376 100644
--- a/lib/Headers/avx512bwintrin.h
+++ b/lib/Headers/avx512bwintrin.h
@@ -1747,6 +1747,15 @@ _mm512_kunpackw (__mmask32 __A, __mmask32 __B)
(__mmask32) __B);
}
+static __inline __m512i __DEFAULT_FN_ATTRS512
+_mm512_loadu_epi16 (void const *__P)
+{
+ struct __loadu_epi16 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi16*)__P)->__v;
+}
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
{
@@ -1764,6 +1773,15 @@ _mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
(__mmask32) __U);
}
+static __inline __m512i __DEFAULT_FN_ATTRS512
+_mm512_loadu_epi8 (void const *__P)
+{
+ struct __loadu_epi8 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi8*)__P)->__v;
+}
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
{
@@ -1780,6 +1798,16 @@ _mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
_mm512_setzero_si512 (),
(__mmask64) __U);
}
+
+static __inline void __DEFAULT_FN_ATTRS512
+_mm512_storeu_epi16 (void *__P, __m512i __A)
+{
+ struct __storeu_epi16 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi16*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
{
@@ -1788,6 +1816,15 @@ _mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
(__mmask32) __U);
}
+static __inline void __DEFAULT_FN_ATTRS512
+_mm512_storeu_epi8 (void *__P, __m512i __A)
+{
+ struct __storeu_epi8 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi8*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
{
diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h
index d00e553077..6fc7f9e8d6 100644
--- a/lib/Headers/avx512fintrin.h
+++ b/lib/Headers/avx512fintrin.h
@@ -4330,6 +4330,15 @@ _mm512_loadu_si512 (void const *__P)
}
static __inline __m512i __DEFAULT_FN_ATTRS512
+_mm512_loadu_epi32 (void const *__P)
+{
+ struct __loadu_epi32 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi32*)__P)->__v;
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
@@ -4348,6 +4357,15 @@ _mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
}
static __inline __m512i __DEFAULT_FN_ATTRS512
+_mm512_loadu_epi64 (void const *__P)
+{
+ struct __loadu_epi64 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi64*)__P)->__v;
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P,
@@ -4483,6 +4501,15 @@ _mm512_load_epi64 (void const *__P)
/* SIMD store ops */
static __inline void __DEFAULT_FN_ATTRS512
+_mm512_storeu_epi64 (void *__P, __m512i __A)
+{
+ struct __storeu_epi64 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi64*)__P)->__v = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
{
__builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A,
@@ -4499,6 +4526,15 @@ _mm512_storeu_si512 (void *__P, __m512i __A)
}
static __inline void __DEFAULT_FN_ATTRS512
+_mm512_storeu_epi32 (void *__P, __m512i __A)
+{
+ struct __storeu_epi32 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi32*)__P)->__v = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
{
__builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A,
diff --git a/lib/Headers/avx512vlbwintrin.h b/lib/Headers/avx512vlbwintrin.h
index 1b038dd04d..87e0023e8b 100644
--- a/lib/Headers/avx512vlbwintrin.h
+++ b/lib/Headers/avx512vlbwintrin.h
@@ -2297,6 +2297,15 @@ _mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
(__v32qi) _mm256_setzero_si256());
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_loadu_epi16 (void const *__P)
+{
+ struct __loadu_epi16 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi16*)__P)->__v;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -2314,6 +2323,15 @@ _mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_loadu_epi16 (void const *__P)
+{
+ struct __loadu_epi16 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi16*)__P)->__v;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
{
@@ -2331,6 +2349,15 @@ _mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
(__mmask16) __U);
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_loadu_epi8 (void const *__P)
+{
+ struct __loadu_epi8 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi8*)__P)->__v;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
{
@@ -2348,6 +2375,15 @@ _mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
(__mmask16) __U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_loadu_epi8 (void const *__P)
+{
+ struct __loadu_epi8 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi8*)__P)->__v;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
{
@@ -2364,7 +2400,17 @@ _mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
_mm256_setzero_si256 (),
(__mmask32) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS256
+
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_storeu_epi16 (void *__P, __m128i __A)
+{
+ struct __storeu_epi16 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi16*)__P)->__v = __A;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
{
__builtin_ia32_storedquhi128_mask ((__v8hi *) __P,
@@ -2372,6 +2418,15 @@ _mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_storeu_epi16 (void *__P, __m256i __A)
+{
+ struct __storeu_epi16 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi16*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
{
@@ -2380,6 +2435,15 @@ _mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
(__mmask16) __U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_storeu_epi8 (void *__P, __m128i __A)
+{
+ struct __storeu_epi8 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi8*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
{
@@ -2388,6 +2452,15 @@ _mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
(__mmask16) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_storeu_epi8 (void *__P, __m256i __A)
+{
+ struct __storeu_epi8 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi8*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
{
diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h
index 0ee1d00ef4..c0ca297837 100644
--- a/lib/Headers/avx512vlintrin.h
+++ b/lib/Headers/avx512vlintrin.h
@@ -462,10 +462,16 @@ _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_and_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a & (__v8su)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_and_si256(__A, __B),
+ (__v8si)_mm256_and_epi32(__A, __B),
(__v8si)__W);
}
@@ -476,10 +482,16 @@ _mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_and_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4su)__a & (__v4su)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_and_si128(__A, __B),
+ (__v4si)_mm_and_epi32(__A, __B),
(__v4si)__W);
}
@@ -490,10 +502,16 @@ _mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_andnot_epi32(__m256i __A, __m256i __B)
+{
+ return (__m256i)(~(__v8su)__A & (__v8su)__B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_andnot_si256(__A, __B),
+ (__v8si)_mm256_andnot_epi32(__A, __B),
(__v8si)__W);
}
@@ -505,24 +523,36 @@ _mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_andnot_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)(~(__v4su)__A & (__v4su)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_andnot_si128(__A, __B),
+ (__v4si)_mm_andnot_epi32(__A, __B),
(__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_or_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a | (__v8su)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_or_si256(__A, __B),
+ (__v8si)_mm256_or_epi32(__A, __B),
(__v8si)__W);
}
@@ -533,10 +563,16 @@ _mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_or_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4su)__a | (__v4su)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_or_si128(__A, __B),
+ (__v4si)_mm_or_epi32(__A, __B),
(__v4si)__W);
}
@@ -547,10 +583,16 @@ _mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_xor_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a ^ (__v8su)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_xor_si256(__A, __B),
+ (__v8si)_mm256_xor_epi32(__A, __B),
(__v8si)__W);
}
@@ -561,11 +603,16 @@ _mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_xor_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4su)__a ^ (__v4su)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_xor_si128(__A, __B),
+ (__v4si)_mm_xor_epi32(__A, __B),
(__v4si)__W);
}
@@ -576,10 +623,16 @@ _mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_and_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a & (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_and_si256(__A, __B),
+ (__v4di)_mm256_and_epi64(__A, __B),
(__v4di)__W);
}
@@ -590,10 +643,16 @@ _mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_and_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a & (__v2du)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_and_si128(__A, __B),
+ (__v2di)_mm_and_epi64(__A, __B),
(__v2di)__W);
}
@@ -604,10 +663,16 @@ _mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_andnot_epi64(__m256i __A, __m256i __B)
+{
+ return (__m256i)(~(__v4du)__A & (__v4du)__B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_andnot_si256(__A, __B),
+ (__v4di)_mm256_andnot_epi64(__A, __B),
(__v4di)__W);
}
@@ -619,10 +684,16 @@ _mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_andnot_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)(~(__v2du)__A & (__v2du)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_andnot_si128(__A, __B),
+ (__v2di)_mm_andnot_epi64(__A, __B),
(__v2di)__W);
}
@@ -633,10 +704,16 @@ _mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_or_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a | (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_or_si256(__A, __B),
+ (__v4di)_mm256_or_epi64(__A, __B),
(__v4di)__W);
}
@@ -647,10 +724,16 @@ _mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_or_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a | (__v2du)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_or_si128(__A, __B),
+ (__v2di)_mm_or_epi64(__A, __B),
(__v2di)__W);
}
@@ -661,10 +744,16 @@ _mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_xor_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a ^ (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_xor_si256(__A, __B),
+ (__v4di)_mm256_xor_epi64(__A, __B),
(__v4di)__W);
}
@@ -675,11 +764,17 @@ _mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_xor_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a ^ (__v2du)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_xor_si128(__A, __B),
+ (__v2di)_mm_xor_epi64(__A, __B),
(__v2di)__W);
}
@@ -4989,6 +5084,12 @@ _mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
(__v8si) _mm256_setzero_si256 ());
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_load_epi32 (void const *__P)
+{
+ return *(__m128i *) __P;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -5008,6 +5109,12 @@ _mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
__U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_load_epi32 (void const *__P)
+{
+ return *(__m256i *) __P;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
{
@@ -5027,6 +5134,12 @@ _mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
__U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_store_epi32 (void *__P, __m128i __A)
+{
+ *(__m128i *) __P = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
{
@@ -5035,6 +5148,12 @@ _mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_store_epi32 (void *__P, __m256i __A)
+{
+ *(__m256i *) __P = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
{
@@ -5075,6 +5194,12 @@ _mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
(__v4di) _mm256_setzero_si256 ());
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_load_epi64 (void const *__P)
+{
+ return *(__m128i *) __P;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -5094,6 +5219,12 @@ _mm_maskz_load_epi64 (__mmask8 __U, void const *__P)
__U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_load_epi64 (void const *__P)
+{
+ return *(__m256i *) __P;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
{
@@ -5113,6 +5244,12 @@ _mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
__U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_store_epi64 (void *__P, __m128i __A)
+{
+ *(__m128i *) __P = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
{
@@ -5121,6 +5258,12 @@ _mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_store_epi64 (void *__P, __m256i __A)
+{
+ *(__m256i *) __P = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A)
{
@@ -5366,6 +5509,15 @@ _mm256_maskz_load_ps (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_loadu_epi64 (void const *__P)
+{
+ struct __loadu_epi64 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi64*)__P)->__v;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -5383,6 +5535,15 @@ _mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_loadu_epi64 (void const *__P)
+{
+ struct __loadu_epi64 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi64*)__P)->__v;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
{
@@ -5400,6 +5561,15 @@ _mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_loadu_epi32 (void const *__P)
+{
+ struct __loadu_epi32 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi32*)__P)->__v;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -5417,6 +5587,15 @@ _mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_loadu_epi32 (void const *__P)
+{
+ struct __loadu_epi32 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi32*)__P)->__v;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
{
@@ -5534,6 +5713,15 @@ _mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_storeu_epi64 (void *__P, __m128i __A)
+{
+ struct __storeu_epi64 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi64*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
{
@@ -5542,6 +5730,15 @@ _mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_storeu_epi64 (void *__P, __m256i __A)
+{
+ struct __storeu_epi64 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi64*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
{
@@ -5550,6 +5747,15 @@ _mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_storeu_epi32 (void *__P, __m128i __A)
+{
+ struct __storeu_epi32 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi32*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
{
@@ -5558,6 +5764,15 @@ _mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_storeu_epi32 (void *__P, __m256i __A)
+{
+ struct __storeu_epi32 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi32*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A)
{
diff --git a/lib/Headers/cuda_wrappers/new b/lib/Headers/cuda_wrappers/new
index 71b7a52363..f49811c5a5 100644
--- a/lib/Headers/cuda_wrappers/new
+++ b/lib/Headers/cuda_wrappers/new
@@ -73,10 +73,12 @@ __device__ inline void operator delete[](void *ptr,
// Sized delete, C++14 only.
#if __cplusplus >= 201402L
-__device__ void operator delete(void *ptr, __SIZE_TYPE__ size) CUDA_NOEXCEPT {
+__device__ inline void operator delete(void *ptr,
+ __SIZE_TYPE__ size) CUDA_NOEXCEPT {
::operator delete(ptr);
}
-__device__ void operator delete[](void *ptr, __SIZE_TYPE__ size) CUDA_NOEXCEPT {
+__device__ inline void operator delete[](void *ptr,
+ __SIZE_TYPE__ size) CUDA_NOEXCEPT {
::operator delete(ptr);
}
#endif
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index f0ea7cd05c..6d61f97199 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -1675,7 +1675,49 @@ _mm_loadu_si64(void const *__a)
long long __v;
} __attribute__((__packed__, __may_alias__));
long long __u = ((struct __loadu_si64*)__a)->__v;
- return __extension__ (__m128i)(__v2di){__u, 0L};
+ return __extension__ (__m128i)(__v2di){__u, 0LL};
+}
+
+/// Loads a 32-bit integer value to the low element of a 128-bit integer
+/// vector and clears the upper element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
+///
+/// \param __a
+/// A pointer to a 32-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns A 128-bit vector of [4 x i32] containing the loaded value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadu_si32(void const *__a)
+{
+ struct __loadu_si32 {
+ int __v;
+ } __attribute__((__packed__, __may_alias__));
+ int __u = ((struct __loadu_si32*)__a)->__v;
+ return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
+}
+
+/// Loads a 16-bit integer value to the low element of a 128-bit integer
+/// vector and clears the upper element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic does not correspond to a specific instruction.
+///
+/// \param __a
+/// A pointer to a 16-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns A 128-bit vector of [8 x i16] containing the loaded value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadu_si16(void const *__a)
+{
+ struct __loadu_si16 {
+ short __v;
+ } __attribute__((__packed__, __may_alias__));
+ short __u = ((struct __loadu_si16*)__a)->__v;
+ return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
}
/// Loads a 64-bit double-precision value to the low element of a
@@ -3993,6 +4035,69 @@ _mm_storeu_si128(__m128i *__p, __m128i __b)
((struct __storeu_si128*)__p)->__v = __b;
}
+/// Stores a 64-bit integer value from the low element of a 128-bit integer
+/// vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
+///
+/// \param __p
+/// A pointer to a 64-bit memory location. The address of the memory
+/// location does not have to be algned.
+/// \param __b
+/// A 128-bit integer vector containing the value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_si64(void const *__p, __m128i __b)
+{
+ struct __storeu_si64 {
+ long long __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
+}
+
+/// Stores a 32-bit integer value from the low element of a 128-bit integer
+/// vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
+///
+/// \param __p
+/// A pointer to a 32-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \param __b
+/// A 128-bit integer vector containing the value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_si32(void const *__p, __m128i __b)
+{
+ struct __storeu_si32 {
+ int __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
+}
+
+/// Stores a 16-bit integer value from the low element of a 128-bit integer
+/// vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic does not correspond to a specific instruction.
+///
+/// \param __p
+/// A pointer to a 16-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \param __b
+/// A 128-bit integer vector containing the value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_si16(void const *__p, __m128i __b)
+{
+ struct __storeu_si16 {
+ short __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
+}
+
/// Moves bytes selected by the mask from the first operand to the
/// specified unaligned memory location. When a mask bit is 1, the
/// corresponding byte is written, otherwise it is not written.
diff --git a/lib/Headers/intrin.h b/lib/Headers/intrin.h
index edb947eef6..b2028ff5ba 100644
--- a/lib/Headers/intrin.h
+++ b/lib/Headers/intrin.h
@@ -90,8 +90,6 @@ void __inwordstring(unsigned short, unsigned short *, unsigned long);
void __lidt(void *);
unsigned __int64 __ll_lshift(unsigned __int64, int);
__int64 __ll_rshift(__int64, int);
-unsigned int __lzcnt(unsigned int);
-unsigned short __lzcnt16(unsigned short);
static __inline__
void __movsb(unsigned char *, unsigned char const *, size_t);
static __inline__
@@ -219,7 +217,6 @@ void __incgsbyte(unsigned long);
void __incgsdword(unsigned long);
void __incgsqword(unsigned long);
void __incgsword(unsigned long);
-unsigned __int64 __lzcnt64(unsigned __int64);
static __inline__
void __movsq(unsigned long long *, unsigned long long const *, size_t);
static __inline__
@@ -329,189 +326,63 @@ __int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
|* Interlocked Exchange Add
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
-}
+char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value);
+char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value);
+char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value);
+short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value);
+short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value);
+short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value);
+long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value);
+__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value);
+__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value);
+__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Increment
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedIncrement16_acq(short volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedIncrement16_nf(short volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedIncrement16_rel(short volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedIncrement_acq(long volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedIncrement_nf(long volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedIncrement_rel(long volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64_acq(__int64 volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64_nf(__int64 volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64_rel(__int64 volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
+short _InterlockedIncrement16_acq(short volatile *_Value);
+short _InterlockedIncrement16_nf(short volatile *_Value);
+short _InterlockedIncrement16_rel(short volatile *_Value);
+long _InterlockedIncrement_acq(long volatile *_Value);
+long _InterlockedIncrement_nf(long volatile *_Value);
+long _InterlockedIncrement_rel(long volatile *_Value);
+__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);
+__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);
+__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Decrement
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedDecrement16_acq(short volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedDecrement16_nf(short volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedDecrement16_rel(short volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedDecrement_acq(long volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedDecrement_nf(long volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedDecrement_rel(long volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64_acq(__int64 volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64_nf(__int64 volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64_rel(__int64 volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
+short _InterlockedDecrement16_acq(short volatile *_Value);
+short _InterlockedDecrement16_nf(short volatile *_Value);
+short _InterlockedDecrement16_rel(short volatile *_Value);
+long _InterlockedDecrement_acq(long volatile *_Value);
+long _InterlockedDecrement_nf(long volatile *_Value);
+long _InterlockedDecrement_rel(long volatile *_Value);
+__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value);
+__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value);
+__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked And
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedAnd8_acq(char volatile *_Value, char _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedAnd8_nf(char volatile *_Value, char _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedAnd8_rel(char volatile *_Value, char _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedAnd16_acq(short volatile *_Value, short _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedAnd16_nf(short volatile *_Value, short _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedAnd16_rel(short volatile *_Value, short _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedAnd_acq(long volatile *_Value, long _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedAnd_nf(long volatile *_Value, long _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedAnd_rel(long volatile *_Value, long _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
-}
+char _InterlockedAnd8_acq(char volatile *_Value, char _Mask);
+char _InterlockedAnd8_nf(char volatile *_Value, char _Mask);
+char _InterlockedAnd8_rel(char volatile *_Value, char _Mask);
+short _InterlockedAnd16_acq(short volatile *_Value, short _Mask);
+short _InterlockedAnd16_nf(short volatile *_Value, short _Mask);
+short _InterlockedAnd16_rel(short volatile *_Value, short _Mask);
+long _InterlockedAnd_acq(long volatile *_Value, long _Mask);
+long _InterlockedAnd_nf(long volatile *_Value, long _Mask);
+long _InterlockedAnd_rel(long volatile *_Value, long _Mask);
+__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask);
#endif
/*----------------------------------------------------------------------------*\
|* Bit Counting and Testing
@@ -534,261 +405,81 @@ unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
|* Interlocked Or
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedOr8_acq(char volatile *_Value, char _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedOr8_nf(char volatile *_Value, char _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedOr8_rel(char volatile *_Value, char _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedOr16_acq(short volatile *_Value, short _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedOr16_nf(short volatile *_Value, short _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedOr16_rel(short volatile *_Value, short _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedOr_acq(long volatile *_Value, long _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedOr_nf(long volatile *_Value, long _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedOr_rel(long volatile *_Value, long _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
-}
+char _InterlockedOr8_acq(char volatile *_Value, char _Mask);
+char _InterlockedOr8_nf(char volatile *_Value, char _Mask);
+char _InterlockedOr8_rel(char volatile *_Value, char _Mask);
+short _InterlockedOr16_acq(short volatile *_Value, short _Mask);
+short _InterlockedOr16_nf(short volatile *_Value, short _Mask);
+short _InterlockedOr16_rel(short volatile *_Value, short _Mask);
+long _InterlockedOr_acq(long volatile *_Value, long _Mask);
+long _InterlockedOr_nf(long volatile *_Value, long _Mask);
+long _InterlockedOr_rel(long volatile *_Value, long _Mask);
+__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Xor
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedXor8_acq(char volatile *_Value, char _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedXor8_nf(char volatile *_Value, char _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedXor8_rel(char volatile *_Value, char _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedXor16_acq(short volatile *_Value, short _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedXor16_nf(short volatile *_Value, short _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedXor16_rel(short volatile *_Value, short _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedXor_acq(long volatile *_Value, long _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedXor_nf(long volatile *_Value, long _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedXor_rel(long volatile *_Value, long _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
-}
+char _InterlockedXor8_acq(char volatile *_Value, char _Mask);
+char _InterlockedXor8_nf(char volatile *_Value, char _Mask);
+char _InterlockedXor8_rel(char volatile *_Value, char _Mask);
+short _InterlockedXor16_acq(short volatile *_Value, short _Mask);
+short _InterlockedXor16_nf(short volatile *_Value, short _Mask);
+short _InterlockedXor16_rel(short volatile *_Value, short _Mask);
+long _InterlockedXor_acq(long volatile *_Value, long _Mask);
+long _InterlockedXor_nf(long volatile *_Value, long _Mask);
+long _InterlockedXor_rel(long volatile *_Value, long _Mask);
+__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Exchange
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchange8_acq(char volatile *_Target, char _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
- return _Value;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchange8_nf(char volatile *_Target, char _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
- return _Value;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchange8_rel(char volatile *_Target, char _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
- return _Value;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchange16_acq(short volatile *_Target, short _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
- return _Value;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchange16_nf(short volatile *_Target, short _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
- return _Value;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchange16_rel(short volatile *_Target, short _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
- return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_acq(long volatile *_Target, long _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
- return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_nf(long volatile *_Target, long _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
- return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_rel(long volatile *_Target, long _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
- return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
- return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
- return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
- return _Value;
-}
+char _InterlockedExchange8_acq(char volatile *_Target, char _Value);
+char _InterlockedExchange8_nf(char volatile *_Target, char _Value);
+char _InterlockedExchange8_rel(char volatile *_Target, char _Value);
+short _InterlockedExchange16_acq(short volatile *_Target, short _Value);
+short _InterlockedExchange16_nf(short volatile *_Target, short _Value);
+short _InterlockedExchange16_rel(short volatile *_Target, short _Value);
+long _InterlockedExchange_acq(long volatile *_Target, long _Value);
+long _InterlockedExchange_nf(long volatile *_Target, long _Value);
+long _InterlockedExchange_rel(long volatile *_Target, long _Value);
+__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value);
+__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value);
+__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Compare Exchange
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_acq(char volatile *_Destination,
- char _Exchange, char _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_nf(char volatile *_Destination,
- char _Exchange, char _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_rel(char volatile *_Destination,
- char _Exchange, char _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_acq(short volatile *_Destination,
- short _Exchange, short _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_nf(short volatile *_Destination,
- short _Exchange, short _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_rel(short volatile *_Destination,
- short _Exchange, short _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_acq(long volatile *_Destination,
- long _Exchange, long _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_nf(long volatile *_Destination,
- long _Exchange, long _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_rel(long volatile *_Destination,
- long _Exchange, long _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
+char _InterlockedCompareExchange8_acq(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+char _InterlockedCompareExchange8_nf(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+char _InterlockedCompareExchange8_rel(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+short _InterlockedCompareExchange16_acq(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+short _InterlockedCompareExchange16_nf(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+short _InterlockedCompareExchange16_rel(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+long _InterlockedCompareExchange_acq(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_nf(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_rel(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
#endif
/*----------------------------------------------------------------------------*\
@@ -841,7 +532,7 @@ __stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
static __inline__ void __DEFAULT_FN_ATTRS
__cpuid(int __info[4], int __level) {
__asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
- : "a"(__level));
+ : "a"(__level), "c"(0));
}
static __inline__ void __DEFAULT_FN_ATTRS
__cpuidex(int __info[4], int __level, int __ecx) {
@@ -865,6 +556,16 @@ __nop(void) {
#endif
/*----------------------------------------------------------------------------*\
+|* MS AArch64 specific
+\*----------------------------------------------------------------------------*/
+#if defined(__aarch64__)
+unsigned __int64 __getReg(int);
+long _InterlockedAdd(long volatile *Addend, long Value);
+int _ReadStatusReg(int);
+void _WriteStatusReg(int, int);
+#endif
+
+/*----------------------------------------------------------------------------*\
|* Privileged intrinsics
\*----------------------------------------------------------------------------*/
#if defined(__i386__) || defined(__x86_64__)
diff --git a/lib/Headers/lzcntintrin.h b/lib/Headers/lzcntintrin.h
index f2674d2a5a..35c1651cc4 100644
--- a/lib/Headers/lzcntintrin.h
+++ b/lib/Headers/lzcntintrin.h
@@ -31,6 +31,7 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
+#ifndef _MSC_VER
/// Counts the number of leading zero bits in the operand.
///
/// \headerfile <x86intrin.h>
@@ -41,11 +42,8 @@
/// An unsigned 16-bit integer whose leading zeros are to be counted.
/// \returns An unsigned 16-bit integer containing the number of leading zero
/// bits in the operand.
-static __inline__ unsigned short __DEFAULT_FN_ATTRS
-__lzcnt16(unsigned short __X)
-{
- return __builtin_ia32_lzcnt_u16(__X);
-}
+#define __lzcnt16(X) __builtin_ia32_lzcnt_u16((unsigned short)(X))
+#endif // _MSC_VER
/// Counts the number of leading zero bits in the operand.
///
@@ -82,6 +80,7 @@ _lzcnt_u32(unsigned int __X)
}
#ifdef __x86_64__
+#ifndef _MSC_VER
/// Counts the number of leading zero bits in the operand.
///
/// \headerfile <x86intrin.h>
@@ -93,11 +92,8 @@ _lzcnt_u32(unsigned int __X)
/// \returns An unsigned 64-bit integer containing the number of leading zero
/// bits in the operand.
/// \see _lzcnt_u64
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__lzcnt64(unsigned long long __X)
-{
- return __builtin_ia32_lzcnt_u64(__X);
-}
+#define __lzcnt64(X) __builtin_ia32_lzcnt_u64((unsigned long long)(X))
+#endif // _MSC_VER
/// Counts the number of leading zero bits in the operand.
///
diff --git a/lib/Headers/opencl-c.h b/lib/Headers/opencl-c.h
index e481c792df..dbb846121e 100644
--- a/lib/Headers/opencl-c.h
+++ b/lib/Headers/opencl-c.h
@@ -22,6 +22,14 @@
#endif //cl_khr_3d_image_writes
#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
+#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#ifndef cl_intel_planar_yuv
+#define cl_intel_planar_yuv
+#endif // cl_intel_planar_yuv
+#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
+#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
+#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+
#define __ovld __attribute__((overloadable))
#define __conv __attribute__((convergent))
@@ -14462,7 +14470,7 @@ half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
-int printf(__constant const char* st, ...);
+int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
#endif
// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
@@ -15707,7 +15715,6 @@ double __ovld __conv work_group_scan_inclusive_max(double x);
// OpenCL v2.0 s6.13.16 - Pipe Functions
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-#define PIPE_RESERVE_ID_VALID_BIT (1U << 30)
#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
@@ -16193,6 +16200,637 @@ void __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, u
void __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data );
#endif // cl_intel_subgroups_short
+#ifdef cl_intel_device_side_avc_motion_estimation
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
+
+#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0
+#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1
+#define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2
+#define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3
+
+#define CLK_AVC_ME_MINOR_8x8_INTEL 0x0
+#define CLK_AVC_ME_MINOR_8x4_INTEL 0x1
+#define CLK_AVC_ME_MINOR_4x8_INTEL 0x2
+#define CLK_AVC_ME_MINOR_4x4_INTEL 0x3
+
+#define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0
+#define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1
+#define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2
+
+#define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0
+#define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E
+#define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D
+#define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B
+#define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77
+#define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F
+#define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F
+#define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F
+
+#define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0
+#define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1
+#define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2
+
+#define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0
+#define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1
+#define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2
+#define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3
+#define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4
+#define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5
+#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6
+#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7
+#define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8
+
+#define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0
+#define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2
+
+#define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0
+#define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1
+#define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3
+
+#define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0
+#define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1
+#define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2
+#define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3
+
+#define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10
+#define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15
+#define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20
+#define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B
+#define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30
+
+#define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0
+#define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2
+#define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4
+#define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8
+
+#define CLK_AVC_ME_INTRA_16x16_INTEL 0x0
+#define CLK_AVC_ME_INTRA_8x8_INTEL 0x1
+#define CLK_AVC_ME_INTRA_4x4_INTEL 0x2
+
+#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0
+#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000
+
+#define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30)
+
+#define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00
+#define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80
+
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3
+
+#define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60
+#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10
+#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8
+#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4
+
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3
+
+#define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1
+#define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2
+#define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3
+
+#define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0
+#define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1
+
+#define CLK_AVC_ME_INITIALIZE_INTEL 0x0
+
+#define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0
+#define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0
+#define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0
+
+#define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0
+
+#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
+#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
+
+// MCE built-in functions
+uchar __ovld
+intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
+ uchar slice_type, uchar qp);
+ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(
+ uchar slice_type, uchar qp);
+uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(
+ uchar slice_type, uchar qp);
+uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(
+ uchar slice_type, uchar qp);
+uint2 __ovld
+intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(
+ uchar slice_type, uchar qp);
+uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(
+ uchar slice_type, uchar qp);
+
+uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();
+uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();
+uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();
+uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();
+uchar __ovld
+intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();
+
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(
+ uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_shape_penalty(
+ ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_direction_penalty(
+ uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_motion_vector_cost_function(
+ ulong packed_cost_center_delta, uint2 packed_cost_table,
+ uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_ac_only_haar(
+ intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_source_interlaced_field_polarity(
+ uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(
+ uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(
+ uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+ intel_sub_group_avc_mce_payload_t payload);
+
+ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(
+ intel_sub_group_avc_mce_result_t result);
+ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(
+ intel_sub_group_avc_mce_result_t result);
+ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_directions(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(
+ intel_sub_group_avc_mce_result_t result);
+uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld
+intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(
+ uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+ intel_sub_group_avc_mce_result_t result);
+
+// IME built-in functions
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_initialize(
+ ushort2 src_coord, uchar partition_mask, uchar sad_adjustment);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_single_reference(
+ short2 ref_offset, uchar search_window_config,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_dual_reference(
+ short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_max_motion_vector_count(
+ uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_unidirectional_mix_disable(
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_early_search_termination_threshold(
+ uchar threshold, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_weighted_sad(
+ uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);
+
+__attribute__((deprecated("If you use the latest Intel driver, please use "
+ "intel_sub_group_avc_ime_ref_window_size instead",
+ "intel_sub_group_avc_ime_ref_window_size")))
+ushort2 __ovld
+intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref);
+ushort2 __ovld intel_sub_group_avc_ime_ref_window_size(
+ uchar search_window_config, char dual_ref);
+short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
+ short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
+ ushort2 image_size);
+
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
+ intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ime_payload_t payload,
+ intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
+ intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ime_payload_t payload,
+ intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+
+intel_sub_group_avc_ime_single_reference_streamin_t __ovld
+intel_sub_group_avc_ime_get_single_reference_streamin(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result);
+intel_sub_group_avc_ime_dual_reference_streamin_t __ovld
+intel_sub_group_avc_ime_get_dual_reference_streamin(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_strip_single_reference_streamout(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_strip_dual_reference_streamout(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
+
+uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+ uchar major_shape);
+ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+ uchar major_shape);
+uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+ uchar major_shape);
+uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+ uchar major_shape, uchar direction);
+ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+ uchar major_shape, uchar direction);
+uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+ uchar major_shape, uchar direction);
+
+uchar __ovld intel_sub_group_avc_ime_get_border_reached(
+ uchar image_select, intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld
+intel_sub_group_avc_ime_get_unidirectional_early_search_termination(
+ intel_sub_group_avc_ime_result_t result);
+uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(
+ intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(
+ intel_sub_group_avc_ime_result_t result);
+
+// REF built-in functions
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_fme_initialize(
+ ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
+ uchar minor_shapes, uchar directions, uchar pixel_resolution,
+ uchar sad_adjustment);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_bme_initialize(
+ ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
+ uchar minor_shapes, uchar directions, uchar pixel_resolution,
+ uchar bidirectional_weight, uchar sad_adjustment);
+
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_bidirectional_mix_disable(
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_bilinear_filter_enable(
+ intel_sub_group_avc_ref_payload_t payload);
+
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_single_reference(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_dual_reference(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_multi_reference(
+ read_only image2d_t src_image, uint packed_reference_ids,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_multi_reference(
+ read_only image2d_t src_image, uint packed_reference_ids,
+ uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ref_payload_t payload);
+
+// SIC built-in functions
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_initialize(
+ ushort2 src_coord);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_configure_skc(
+ uint skip_block_partition_type, uint skip_motion_vector_mask,
+ ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_configure_ipe(
+ uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
+ uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
+ uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
+ uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_configure_ipe(
+ uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
+ uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
+ uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
+ ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,
+ ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment,
+ intel_sub_group_avc_sic_payload_t payload);
+uint __ovld
+intel_sub_group_avc_sic_get_motion_vector_mask(
+ uint skip_block_partition_type, uchar direction);
+
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_luma_shape_penalty(
+ uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(
+ uchar luma_mode_penalty, uint luma_packed_neighbor_modes,
+ uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(
+ uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_skc_forward_transform_enable(
+ ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
+ uchar block_based_skip_type,
+ intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_ipe(
+ read_only image2d_t src_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_single_reference(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_dual_reference(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_multi_reference(
+ read_only image2d_t src_image, uint packed_reference_ids,
+ sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_multi_reference(
+ read_only image2d_t src_image, uint packed_reference_ids,
+ uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
+ intel_sub_group_avc_sic_payload_t payload);
+
+uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
+ intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(
+ intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(
+ intel_sub_group_avc_sic_result_t result);
+ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(
+ intel_sub_group_avc_sic_result_t result);
+uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(
+ intel_sub_group_avc_sic_result_t result);
+uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(
+ intel_sub_group_avc_sic_result_t result);
+ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(
+ intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(
+ intel_sub_group_avc_sic_result_t result);
+
+// Wrappers
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(
+ uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(
+ uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(
+ uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_shape_penalty(
+ ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_shape_penalty(
+ ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_shape_penalty(
+ ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_direction_penalty(
+ uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_direction_penalty(
+ uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_direction_penalty(
+ uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_motion_vector_cost_function(
+ ulong packed_cost_center_delta, uint2 packed_cost_table,
+ uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_motion_vector_cost_function(
+ ulong packed_cost_center_delta, uint2 packed_cost_table,
+ uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_motion_vector_cost_function(
+ ulong packed_cost_center_delta, uint2 packed_cost_table,
+ uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_source_interlaced_field_polarity(
+ uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_source_interlaced_field_polarity(
+ uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_source_interlaced_field_polarity(
+ uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(
+ uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(
+ uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(
+ uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(
+ uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(
+ uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(
+ uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+ intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_ac_only_haar(
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_ac_only_haar(
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_ac_only_haar(
+ intel_sub_group_avc_sic_payload_t payload);
+
+ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(
+ intel_sub_group_avc_ime_result_t result);
+ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(
+ intel_sub_group_avc_ref_result_t result);
+
+ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(
+ intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(
+ intel_sub_group_avc_ref_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(
+ intel_sub_group_avc_sic_result_t result);
+
+ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(
+ intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(
+ intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(
+ intel_sub_group_avc_ref_result_t result);
+uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(
+ intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_directions(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_directions(
+ intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(
+ intel_sub_group_avc_ref_result_t result);
+
+uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(
+ intel_sub_group_avc_ime_result_t result);
+uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(
+ intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld
+intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(
+ uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld
+intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(
+ uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+ intel_sub_group_avc_ref_result_t result);
+
+// Type conversion functions
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_ime_convert_to_mce_payload(
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_ime_payload(
+ intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_ref_convert_to_mce_payload(
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_ref_payload(
+ intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_sic_convert_to_mce_payload(
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_sic_payload(
+ intel_sub_group_avc_mce_payload_t payload);
+
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_ime_convert_to_mce_result(
+ intel_sub_group_avc_ime_result_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_mce_convert_to_ime_result(
+ intel_sub_group_avc_mce_result_t result);
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_ref_convert_to_mce_result(
+ intel_sub_group_avc_ref_result_t result);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_mce_convert_to_ref_result(
+ intel_sub_group_avc_mce_result_t result);
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_sic_convert_to_mce_result(
+ intel_sub_group_avc_sic_result_t result);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_mce_convert_to_sic_result(
+ intel_sub_group_avc_mce_result_t result);
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
+#endif // cl_intel_device_side_avc_motion_estimation
+
#ifdef cl_amd_media_ops
uint __ovld amd_bitalign(uint a, uint b, uint c);
uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
diff --git a/lib/Index/CommentToXML.cpp b/lib/Index/CommentToXML.cpp
index 918068a240..a2659119a2 100644
--- a/lib/Index/CommentToXML.cpp
+++ b/lib/Index/CommentToXML.cpp
@@ -720,6 +720,7 @@ void CommentASTToXMLConverter::visitBlockCommandComment(
case CommandTraits::KCI_version:
case CommandTraits::KCI_warning:
ParagraphKind = C->getCommandName(Traits);
+ break;
default:
break;
}
diff --git a/lib/Index/IndexTypeSourceInfo.cpp b/lib/Index/IndexTypeSourceInfo.cpp
index 8342e93c15..85afc63450 100644
--- a/lib/Index/IndexTypeSourceInfo.cpp
+++ b/lib/Index/IndexTypeSourceInfo.cpp
@@ -100,7 +100,8 @@ public:
bool VisitTagTypeLoc(TagTypeLoc TL) {
TagDecl *D = TL.getDecl();
- if (D->getParentFunctionOrMethod())
+ if (!IndexCtx.shouldIndexFunctionLocalSymbols() &&
+ D->getParentFunctionOrMethod())
return true;
if (TL.isDefinition()) {
diff --git a/lib/Index/SimpleFormatContext.h b/lib/Index/SimpleFormatContext.h
index 9c6d29bec3..24adcac602 100644
--- a/lib/Index/SimpleFormatContext.h
+++ b/lib/Index/SimpleFormatContext.h
@@ -36,12 +36,10 @@ class SimpleFormatContext {
public:
SimpleFormatContext(LangOptions Options)
: DiagOpts(new DiagnosticOptions()),
- Diagnostics(new DiagnosticsEngine(new DiagnosticIDs,
- DiagOpts.get())),
- InMemoryFileSystem(new vfs::InMemoryFileSystem),
+ Diagnostics(new DiagnosticsEngine(new DiagnosticIDs, DiagOpts.get())),
+ InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
Files(FileSystemOptions(), InMemoryFileSystem),
- Sources(*Diagnostics, Files),
- Rewrite(Sources, Options) {
+ Sources(*Diagnostics, Files), Rewrite(Sources, Options) {
Diagnostics->setClient(new IgnoringDiagConsumer, true);
}
@@ -63,7 +61,7 @@ public:
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem;
+ IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem;
FileManager Files;
SourceManager Sources;
Rewriter Rewrite;
diff --git a/lib/Index/USRGeneration.cpp b/lib/Index/USRGeneration.cpp
index 8194c9508f..84ca753bf8 100644
--- a/lib/Index/USRGeneration.cpp
+++ b/lib/Index/USRGeneration.cpp
@@ -97,6 +97,7 @@ public:
void VisitTypedefDecl(const TypedefDecl *D);
void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D);
void VisitVarDecl(const VarDecl *D);
+ void VisitBindingDecl(const BindingDecl *D);
void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D);
void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D);
@@ -269,7 +270,8 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
if (MD->isStatic())
Out << 'S';
- if (unsigned quals = MD->getTypeQualifiers())
+ // FIXME: OpenCL: Need to consider address spaces
+ if (unsigned quals = MD->getTypeQualifiers().getCVRUQualifiers())
Out << (char)('0' + quals);
switch (MD->getRefQualifier()) {
case RQ_None: break;
@@ -334,6 +336,12 @@ void USRGenerator::VisitVarDecl(const VarDecl *D) {
}
}
+void USRGenerator::VisitBindingDecl(const BindingDecl *D) {
+ if (isLocal(D) && GenLoc(D, /*IncludeOffset=*/true))
+ return;
+ VisitNamedDecl(D);
+}
+
void USRGenerator::VisitNonTypeTemplateParmDecl(
const NonTypeTemplateParmDecl *D) {
GenLoc(D, /*IncludeOffset=*/true);
@@ -704,6 +712,9 @@ void USRGenerator::VisitType(QualType T) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
@@ -935,7 +946,7 @@ void USRGenerator::VisitTemplateArgument(const TemplateArgument &Arg) {
case TemplateArgument::TemplateExpansion:
Out << 'P'; // pack expansion of...
- // Fall through
+ LLVM_FALLTHROUGH;
case TemplateArgument::Template:
VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
break;
@@ -1095,6 +1106,17 @@ bool clang::index::generateUSRForMacro(StringRef MacroName, SourceLocation Loc,
return false;
}
+bool clang::index::generateUSRForType(QualType T, ASTContext &Ctx,
+ SmallVectorImpl<char> &Buf) {
+ if (T.isNull())
+ return true;
+ T = T.getCanonicalType();
+
+ USRGenerator UG(&Ctx, Buf);
+ UG.VisitType(T);
+ return UG.ignoreResults();
+}
+
bool clang::index::generateFullUSRForModule(const Module *Mod,
raw_ostream &OS) {
if (!Mod->Parent)
diff --git a/lib/Lex/CMakeLists.txt b/lib/Lex/CMakeLists.txt
index 38df144adf..7888b15cb6 100644
--- a/lib/Lex/CMakeLists.txt
+++ b/lib/Lex/CMakeLists.txt
@@ -17,7 +17,6 @@ add_clang_library(clangLex
PPExpressions.cpp
PPLexerChange.cpp
PPMacroExpansion.cpp
- PTHLexer.cpp
Pragma.cpp
PreprocessingRecord.cpp
Preprocessor.cpp
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index c475336ca5..5b827b13c0 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -17,7 +17,6 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/HeaderMap.h"
@@ -35,6 +34,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -1571,8 +1571,9 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
DirNative);
// Search each of the ".framework" directories to load them as modules.
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC),
+ DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
if (llvm::sys::path::extension(Dir->path()) != ".framework")
continue;
@@ -1637,10 +1638,12 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
return;
std::error_code EC;
+ SmallString<128> Dir = SearchDir.getDir()->getName();
+ FileMgr.makeAbsolutePath(Dir);
SmallString<128> DirNative;
- llvm::sys::path::native(SearchDir.getDir()->getName(), DirNative);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ llvm::sys::path::native(Dir, DirNative);
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
bool IsFramework = llvm::sys::path::extension(Dir->path()) == ".framework";
if (IsFramework == SearchDir.isFramework())
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 35ba8c11ec..d472309111 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -1015,7 +1015,7 @@ StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
StringRef Lexer::getImmediateMacroNameForDiagnostics(
SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) {
assert(Loc.isMacroID() && "Only reasonable to call this on macros");
- // Walk past macro argument expanions.
+ // Walk past macro argument expansions.
while (SM.isMacroArgExpansion(Loc))
Loc = SM.getImmediateExpansionRange(Loc).getBegin();
@@ -3844,7 +3844,7 @@ LexNextToken:
case '@':
// Objective C support.
- if (CurPtr[-1] == '@' && LangOpts.ObjC1)
+ if (CurPtr[-1] == '@' && LangOpts.ObjC)
Kind = tok::at;
else
Kind = tok::unknown;
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index 3f2af1a74e..fa0815eb9c 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -693,7 +693,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
break;
}
}
- // fall through.
+ LLVM_FALLTHROUGH;
case 'j':
case 'J':
if (isImaginary) break; // Cannot be repeated.
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
index 4ed69ecc46..434c120075 100644
--- a/lib/Lex/MacroInfo.cpp
+++ b/lib/Lex/MacroInfo.cpp
@@ -200,7 +200,8 @@ MacroDirective::DefInfo MacroDirective::getDefinition() {
}
const MacroDirective::DefInfo
-MacroDirective::findDirectiveAtLoc(SourceLocation L, SourceManager &SM) const {
+MacroDirective::findDirectiveAtLoc(SourceLocation L,
+ const SourceManager &SM) const {
assert(L.isValid() && "SourceLocation is invalid.");
for (DefInfo Def = getDefinition(); Def; Def = Def.getPreviousDefinition()) {
if (Def.getLocation().isInvalid() || // For macros defined on the command line.
diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp
index b6a6e26d6a..13d2b728f5 100644
--- a/lib/Lex/ModuleMap.cpp
+++ b/lib/Lex/ModuleMap.cpp
@@ -22,7 +22,6 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/LexDiagnostic.h"
@@ -43,6 +42,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -523,7 +523,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
// At this point, only non-modular includes remain.
- if (LangOpts.ModulesStrictDeclUse) {
+ if (RequestingModule && LangOpts.ModulesStrictDeclUse) {
Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
<< RequestingModule->getTopLevelModule()->Name << Filename;
} else if (RequestingModule && RequestingModuleIsModuleInterface &&
@@ -1020,9 +1020,10 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
= StringRef(FrameworkDir->getName());
llvm::sys::path::append(SubframeworksDirName, "Frameworks");
llvm::sys::path::native(SubframeworksDirName);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(SubframeworksDirName, EC),
- DirEnd;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator
+ Dir = FS.dir_begin(SubframeworksDirName, EC),
+ DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
if (!StringRef(Dir->path()).endswith(".framework"))
continue;
@@ -2394,8 +2395,9 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
// uncommonly used Tcl module on Darwin platforms.
std::error_code EC;
SmallVector<Module::Header, 6> Headers;
- vfs::FileSystem &FS = *SourceMgr.getFileManager().getVirtualFileSystem();
- for (vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
+ llvm::vfs::FileSystem &FS =
+ *SourceMgr.getFileManager().getVirtualFileSystem();
+ for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
if (const FileEntry *FE = SourceMgr.getFileManager().getFile(I->path())) {
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 00397e1e1a..15fc086f8a 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -31,7 +31,6 @@
#include "clang/Lex/Pragma.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
-#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/Token.h"
#include "clang/Lex/VariadicMacroSupport.h"
#include "llvm/ADT/ArrayRef.h"
@@ -119,7 +118,7 @@ static bool isReservedId(StringRef Text, const LangOptions &Lang) {
// the specified module, meaning clang won't build the specified module. This is
// useful in a number of situations, for instance, when building a library that
// vends a module map, one might want to avoid hitting intermediate build
-// products containig the the module map or avoid finding the system installed
+// products containimg the the module map or avoid finding the system installed
// modulemap for that library.
static bool isForModuleBuilding(Module *M, StringRef CurrentModule,
StringRef ModuleName) {
@@ -383,11 +382,6 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
CurPPLexer->pushConditionalLevel(IfTokenLoc, /*isSkipping*/ false,
FoundNonSkipPortion, FoundElse);
- if (CurPTHLexer) {
- PTHSkipExcludedConditionalBlock();
- return;
- }
-
// Enter raw mode to disable identifier lookup (and thus macro expansion),
// disabling warnings, etc.
CurPPLexer->LexingRawMode = true;
@@ -405,7 +399,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// If this is the end of the buffer, we have an error.
if (Tok.is(tok::eof)) {
// We don't emit errors for unterminated conditionals here,
- // Lexer::LexEndOfFile can do that propertly.
+ // Lexer::LexEndOfFile can do that properly.
// Just return and let the caller lex after this #include.
if (PreambleConditionalStack.isRecording())
PreambleConditionalStack.SkipInfo.emplace(
@@ -585,83 +579,6 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
Tok.getLocation());
}
-void Preprocessor::PTHSkipExcludedConditionalBlock() {
- while (true) {
- assert(CurPTHLexer);
- assert(CurPTHLexer->LexingRawMode == false);
-
- // Skip to the next '#else', '#elif', or #endif.
- if (CurPTHLexer->SkipBlock()) {
- // We have reached an #endif. Both the '#' and 'endif' tokens
- // have been consumed by the PTHLexer. Just pop off the condition level.
- PPConditionalInfo CondInfo;
- bool InCond = CurPTHLexer->popConditionalLevel(CondInfo);
- (void)InCond; // Silence warning in no-asserts mode.
- assert(!InCond && "Can't be skipping if not in a conditional!");
- break;
- }
-
- // We have reached a '#else' or '#elif'. Lex the next token to get
- // the directive flavor.
- Token Tok;
- LexUnexpandedToken(Tok);
-
- // We can actually look up the IdentifierInfo here since we aren't in
- // raw mode.
- tok::PPKeywordKind K = Tok.getIdentifierInfo()->getPPKeywordID();
-
- if (K == tok::pp_else) {
- // #else: Enter the else condition. We aren't in a nested condition
- // since we skip those. We're always in the one matching the last
- // blocked we skipped.
- PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
- // Note that we've seen a #else in this conditional.
- CondInfo.FoundElse = true;
-
- // If the #if block wasn't entered then enter the #else block now.
- if (!CondInfo.FoundNonSkip) {
- CondInfo.FoundNonSkip = true;
-
- // Scan until the eod token.
- CurPTHLexer->ParsingPreprocessorDirective = true;
- DiscardUntilEndOfDirective();
- CurPTHLexer->ParsingPreprocessorDirective = false;
-
- break;
- }
-
- // Otherwise skip this block.
- continue;
- }
-
- assert(K == tok::pp_elif);
- PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
-
- // If this is a #elif with a #else before it, report the error.
- if (CondInfo.FoundElse)
- Diag(Tok, diag::pp_err_elif_after_else);
-
- // If this is in a skipping block or if we're already handled this #if
- // block, don't bother parsing the condition. We just skip this block.
- if (CondInfo.FoundNonSkip)
- continue;
-
- // Evaluate the condition of the #elif.
- IdentifierInfo *IfNDefMacro = nullptr;
- CurPTHLexer->ParsingPreprocessorDirective = true;
- bool ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro).Conditional;
- CurPTHLexer->ParsingPreprocessorDirective = false;
-
- // If this condition is true, enter it!
- if (ShouldEnter) {
- CondInfo.FoundNonSkip = true;
- break;
- }
-
- // Otherwise, skip this block and go to the next one.
- }
-}
-
Module *Preprocessor::getModuleForLocation(SourceLocation Loc) {
if (!SourceMgr.isInMainFile(Loc)) {
// Try to determine the module of the include directive.
@@ -690,7 +607,7 @@ Preprocessor::getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
// If we have a module import syntax, we shouldn't include a header to
// make a particular module visible.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
return nullptr;
Module *TopM = M->getTopLevelModule();
@@ -1387,10 +1304,6 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
///
void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
bool isWarning) {
- // PTH doesn't emit #warning or #error directives.
- if (CurPTHLexer)
- return CurPTHLexer->DiscardToEndOfLine();
-
// Read the rest of the line raw. We do this because we don't want macros
// to be expanded and we don't require that the tokens be valid preprocessing
// tokens. For example, this is allowed: "#warning ` 'foo". GCC does
@@ -1629,7 +1542,7 @@ static void diagnoseAutoModuleImport(
Preprocessor &PP, SourceLocation HashLoc, Token &IncludeTok,
ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> Path,
SourceLocation PathEnd) {
- assert(PP.getLangOpts().ObjC2 && "no import syntax available");
+ assert(PP.getLangOpts().ObjC && "no import syntax available");
SmallString<128> PathString;
for (size_t I = 0, N = Path.size(); I != N; ++I) {
@@ -1794,6 +1707,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Check that we don't have infinite #include recursion.
if (IncludeMacroStack.size() == MaxAllowedIncludeStackDepth-1) {
Diag(FilenameTok, diag::err_pp_include_too_deep);
+ HasReachedMaxIncludeDepth = true;
return;
}
@@ -1888,28 +1802,35 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Check for likely typos due to leading or trailing non-isAlphanumeric
// characters
StringRef OriginalFilename = Filename;
- if (!File) {
- while (!isAlphanumeric(Filename.front())) {
- Filename = Filename.drop_front();
- }
- while (!isAlphanumeric(Filename.back())) {
- Filename = Filename.drop_back();
- }
-
+ if (LangOpts.SpellChecking && !File) {
+ // A heuristic to correct a typo file name by removing leading and
+ // trailing non-isAlphanumeric characters.
+ auto CorrectTypoFilename = [](llvm::StringRef Filename) {
+ Filename = Filename.drop_until(isAlphanumeric);
+ while (!Filename.empty() && !isAlphanumeric(Filename.back())) {
+ Filename = Filename.drop_back();
+ }
+ return Filename;
+ };
+ StringRef TypoCorrectionName = CorrectTypoFilename(Filename);
File = LookupFile(
FilenameLoc,
- LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, isAngled,
- LookupFrom, LookupFromFile, CurDir,
+ LangOpts.MSVCCompat ? NormalizedPath.c_str() : TypoCorrectionName,
+ isAngled, LookupFrom, LookupFromFile, CurDir,
Callbacks ? &SearchPath : nullptr,
Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped);
if (File) {
SourceRange Range(FilenameTok.getLocation(), CharEnd);
- auto Hint = isAngled ? FixItHint::CreateReplacement(
- Range, "<" + Filename.str() + ">")
- : FixItHint::CreateReplacement(
- Range, "\"" + Filename.str() + "\"");
+ auto Hint = isAngled
+ ? FixItHint::CreateReplacement(
+ Range, "<" + TypoCorrectionName.str() + ">")
+ : FixItHint::CreateReplacement(
+ Range, "\"" + TypoCorrectionName.str() + "\"");
Diag(FilenameTok, diag::err_pp_file_not_found_typo_not_fatal)
- << OriginalFilename << Filename << Hint;
+ << OriginalFilename << TypoCorrectionName << Hint;
+ // We found the file, so set the Filename to the name after typo
+ // correction.
+ Filename = TypoCorrectionName;
}
}
@@ -1935,10 +1856,11 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (PPOpts->SingleFileParseMode)
ShouldEnter = false;
- // Any diagnostics after the fatal error will not be visible. As the
- // compilation failed already and errors in subsequently included files won't
- // be visible, avoid preprocessing those files.
- if (ShouldEnter && Diags->hasFatalErrorOccurred())
+ // If we've reached the max allowed include depth, it is usually due to an
+ // include cycle. Don't enter already processed files again as it can lead to
+ // reaching the max allowed include depth again.
+ if (ShouldEnter && HasReachedMaxIncludeDepth && File &&
+ HeaderInfo.getFileInfo(File).NumIncludes)
ShouldEnter = false;
// Determine whether we should try to import the module for this #include, if
@@ -1971,7 +1893,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Warn that we're replacing the include/import with a module import.
// We only do this in Objective-C, where we have a module-import syntax.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
diagnoseAutoModuleImport(*this, HashLoc, IncludeTok, Path, CharEnd);
// Load the module to import its macros. We'll make the declarations
@@ -2000,14 +1922,10 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (hadModuleLoaderFatalFailure()) {
// With a fatal failure in the module loader, we abort parsing.
Token &Result = IncludeTok;
- if (CurLexer) {
- Result.startToken();
- CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
- CurLexer->cutOffLexing();
- } else {
- assert(CurPTHLexer && "#include but no current lexer set!");
- CurPTHLexer->getEOF(Result);
- }
+ assert(CurLexer && "#include but no current lexer set!");
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer->cutOffLexing();
}
return;
}
@@ -2208,7 +2126,7 @@ void Preprocessor::HandleMicrosoftImportDirective(Token &Tok) {
///
void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
Token &ImportTok) {
- if (!LangOpts.ObjC1) { // #import is standard for ObjC.
+ if (!LangOpts.ObjC) { // #import is standard for ObjC.
if (LangOpts.MSVCCompat)
return HandleMicrosoftImportDirective(ImportTok);
Diag(ImportTok, diag::ext_pp_import_directive);
@@ -2679,7 +2597,7 @@ void Preprocessor::HandleDefineDirective(
II->isStr("__unsafe_unretained") ||
II->isStr("__autoreleasing");
};
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
SourceMgr.getFileID(OtherMI->getDefinitionLoc())
== getPredefinesFileID() &&
isObjCProtectedMacro(MacroNameTok.getIdentifierInfo())) {
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index 2ec075fa34..e321dd38fe 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -19,7 +19,6 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/MacroInfo.h"
-#include "clang/Lex/PTHManager.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -76,13 +75,6 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
if (MaxIncludeStackDepth < IncludeMacroStack.size())
MaxIncludeStackDepth = IncludeMacroStack.size();
- if (PTH) {
- if (PTHLexer *PL = PTH->CreateLexer(FID)) {
- EnterSourceFileWithPTH(PL, CurDir);
- return false;
- }
- }
-
// Get the MemoryBuffer for this FID, if it fails, we fail.
bool Invalid = false;
const llvm::MemoryBuffer *InputFile =
@@ -131,31 +123,6 @@ void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
}
}
-/// EnterSourceFileWithPTH - Add a source file to the top of the include stack
-/// and start getting tokens from it using the PTH cache.
-void Preprocessor::EnterSourceFileWithPTH(PTHLexer *PL,
- const DirectoryLookup *CurDir) {
-
- if (CurPPLexer || CurTokenLexer)
- PushIncludeMacroStack();
-
- CurDirLookup = CurDir;
- CurPTHLexer.reset(PL);
- CurPPLexer = CurPTHLexer.get();
- CurLexerSubmodule = nullptr;
- if (CurLexerKind != CLK_LexAfterModuleImport)
- CurLexerKind = CLK_PTHLexer;
-
- // Notify the client, if desired, that we are in a new source file.
- if (Callbacks) {
- FileID FID = CurPPLexer->getFileID();
- SourceLocation EnterLoc = SourceMgr.getLocForStartOfFile(FID);
- SrcMgr::CharacteristicKind FileType =
- SourceMgr.getFileCharacteristic(EnterLoc);
- Callbacks->FileChanged(EnterLoc, PPCallbacks::EnterFile, FileType);
- }
-}
-
/// EnterMacro - Add a Macro to the top of the include stack and start lexing
/// tokens from it instead of the current buffer.
void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
@@ -304,9 +271,10 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
const DirectoryEntry *Dir = Mod.getUmbrellaDir().Entry;
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
std::error_code EC;
- for (vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC), End;
+ for (llvm::vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC),
+ End;
Entry != End && !EC; Entry.increment(EC)) {
using llvm::StringSwitch;
@@ -339,7 +307,6 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
// If we have an unclosed module region from a pragma at the end of a
// module, complain and close it now.
- // FIXME: This is not correct if we are building a module from PTH.
const bool LeavingSubmodule = CurLexer && CurLexerSubmodule;
if ((LeavingSubmodule || IncludeMacroStack.empty()) &&
!BuildingSubmoduleStack.empty() &&
@@ -436,15 +403,10 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
if (isCodeCompletionEnabled() && CurPPLexer &&
SourceMgr.getLocForStartOfFile(CurPPLexer->getFileID()) ==
CodeCompletionFileLoc) {
- if (CurLexer) {
- Result.startToken();
- CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
- CurLexer.reset();
- } else {
- assert(CurPTHLexer && "Got EOF but no current lexer set!");
- CurPTHLexer->getEOF(Result);
- CurPTHLexer.reset();
- }
+ assert(CurLexer && "Got EOF but no current lexer set!");
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer.reset();
CurPPLexer = nullptr;
recomputeCurLexerKind();
@@ -522,39 +484,34 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
}
// If this is the end of the main file, form an EOF token.
- if (CurLexer) {
- const char *EndPos = getCurLexerEndPos();
- Result.startToken();
- CurLexer->BufferPtr = EndPos;
- CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
-
- if (isCodeCompletionEnabled()) {
- // Inserting the code-completion point increases the source buffer by 1,
- // but the main FileID was created before inserting the point.
- // Compensate by reducing the EOF location by 1, otherwise the location
- // will point to the next FileID.
- // FIXME: This is hacky, the code-completion point should probably be
- // inserted before the main FileID is created.
- if (CurLexer->getFileLoc() == CodeCompletionFileLoc)
- Result.setLocation(Result.getLocation().getLocWithOffset(-1));
- }
-
- if (creatingPCHWithThroughHeader() && !LeavingPCHThroughHeader) {
- // Reached the end of the compilation without finding the through header.
- Diag(CurLexer->getFileLoc(), diag::err_pp_through_header_not_seen)
- << PPOpts->PCHThroughHeader << 0;
- }
+ assert(CurLexer && "Got EOF but no current lexer set!");
+ const char *EndPos = getCurLexerEndPos();
+ Result.startToken();
+ CurLexer->BufferPtr = EndPos;
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+
+ if (isCodeCompletionEnabled()) {
+ // Inserting the code-completion point increases the source buffer by 1,
+ // but the main FileID was created before inserting the point.
+ // Compensate by reducing the EOF location by 1, otherwise the location
+ // will point to the next FileID.
+ // FIXME: This is hacky, the code-completion point should probably be
+ // inserted before the main FileID is created.
+ if (CurLexer->getFileLoc() == CodeCompletionFileLoc)
+ Result.setLocation(Result.getLocation().getLocWithOffset(-1));
+ }
- if (!isIncrementalProcessingEnabled())
- // We're done with lexing.
- CurLexer.reset();
- } else {
- assert(CurPTHLexer && "Got EOF but no current lexer set!");
- CurPTHLexer->getEOF(Result);
- CurPTHLexer.reset();
+ if (creatingPCHWithThroughHeader() && !LeavingPCHThroughHeader) {
+ // Reached the end of the compilation without finding the through header.
+ Diag(CurLexer->getFileLoc(), diag::err_pp_through_header_not_seen)
+ << PPOpts->PCHThroughHeader << 0;
}
if (!isIncrementalProcessingEnabled())
+ // We're done with lexing.
+ CurLexer.reset();
+
+ if (!isIncrementalProcessingEnabled())
CurPPLexer = nullptr;
if (TUKind == TU_Complete) {
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index ffc2ed8c93..c70ff46ec9 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -29,7 +29,6 @@
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorLexer.h"
-#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
@@ -429,8 +428,6 @@ bool Preprocessor::isNextPPTokenLParen() {
unsigned Val;
if (CurLexer)
Val = CurLexer->isNextPPTokenLParen();
- else if (CurPTHLexer)
- Val = CurPTHLexer->isNextPPTokenLParen();
else
Val = CurTokenLexer->isNextTokenLParen();
@@ -443,8 +440,6 @@ bool Preprocessor::isNextPPTokenLParen() {
for (const IncludeStackInfo &Entry : llvm::reverse(IncludeMacroStack)) {
if (Entry.TheLexer)
Val = Entry.TheLexer->isNextPPTokenLParen();
- else if (Entry.ThePTHLexer)
- Val = Entry.ThePTHLexer->isNextPPTokenLParen();
else
Val = Entry.TheTokenLexer->isNextTokenLParen();
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
deleted file mode 100644
index 45cff56dca..0000000000
--- a/lib/Lex/PTHLexer.cpp
+++ /dev/null
@@ -1,748 +0,0 @@
-//===- PTHLexer.cpp - Lex from a token stream -----------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PTHLexer interface.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Lex/PTHLexer.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/FileManager.h"
-#include "clang/Basic/FileSystemStatCache.h"
-#include "clang/Basic/IdentifierTable.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/TokenKinds.h"
-#include "clang/Lex/LexDiagnostic.h"
-#include "clang/Lex/PTHManager.h"
-#include "clang/Lex/Preprocessor.h"
-#include "clang/Lex/Token.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/DJB.h"
-#include "llvm/Support/Endian.h"
-#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/OnDiskHashTable.h"
-#include <cassert>
-#include <cstdint>
-#include <cstdlib>
-#include <cstring>
-#include <ctime>
-#include <memory>
-#include <utility>
-
-using namespace clang;
-
-static const unsigned StoredTokenSize = 1 + 1 + 2 + 4 + 4;
-
-//===----------------------------------------------------------------------===//
-// PTHLexer methods.
-//===----------------------------------------------------------------------===//
-
-PTHLexer::PTHLexer(Preprocessor &PP, FileID FID, const unsigned char *D,
- const unsigned char *ppcond, PTHManager &PM)
- : PreprocessorLexer(&PP, FID), TokBuf(D), CurPtr(D), PPCond(ppcond),
- CurPPCondPtr(ppcond), PTHMgr(PM) {
- FileStartLoc = PP.getSourceManager().getLocForStartOfFile(FID);
-}
-
-bool PTHLexer::Lex(Token& Tok) {
- //===--------------------------------------==//
- // Read the raw token data.
- //===--------------------------------------==//
- using namespace llvm::support;
-
- // Shadow CurPtr into an automatic variable.
- const unsigned char *CurPtrShadow = CurPtr;
-
- // Read in the data for the token.
- unsigned Word0 = endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
- uint32_t IdentifierID =
- endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
- uint32_t FileOffset =
- endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
-
- tok::TokenKind TKind = (tok::TokenKind) (Word0 & 0xFF);
- Token::TokenFlags TFlags = (Token::TokenFlags) ((Word0 >> 8) & 0xFF);
- uint32_t Len = Word0 >> 16;
-
- CurPtr = CurPtrShadow;
-
- //===--------------------------------------==//
- // Construct the token itself.
- //===--------------------------------------==//
-
- Tok.startToken();
- Tok.setKind(TKind);
- Tok.setFlag(TFlags);
- assert(!LexingRawMode);
- Tok.setLocation(FileStartLoc.getLocWithOffset(FileOffset));
- Tok.setLength(Len);
-
- // Handle identifiers.
- if (Tok.isLiteral()) {
- Tok.setLiteralData((const char*) (PTHMgr.SpellingBase + IdentifierID));
- }
- else if (IdentifierID) {
- MIOpt.ReadToken();
- IdentifierInfo *II = PTHMgr.GetIdentifierInfo(IdentifierID-1);
-
- Tok.setIdentifierInfo(II);
-
- // Change the kind of this identifier to the appropriate token kind, e.g.
- // turning "for" into a keyword.
- Tok.setKind(II->getTokenID());
-
- if (II->isHandleIdentifierCase())
- return PP->HandleIdentifier(Tok);
-
- return true;
- }
-
- //===--------------------------------------==//
- // Process the token.
- //===--------------------------------------==//
- if (TKind == tok::eof) {
- // Save the end-of-file token.
- EofToken = Tok;
-
- assert(!ParsingPreprocessorDirective);
- assert(!LexingRawMode);
-
- return LexEndOfFile(Tok);
- }
-
- if (TKind == tok::hash && Tok.isAtStartOfLine()) {
- LastHashTokPtr = CurPtr - StoredTokenSize;
- assert(!LexingRawMode);
- PP->HandleDirective(Tok);
-
- return false;
- }
-
- if (TKind == tok::eod) {
- assert(ParsingPreprocessorDirective);
- ParsingPreprocessorDirective = false;
- return true;
- }
-
- MIOpt.ReadToken();
- return true;
-}
-
-bool PTHLexer::LexEndOfFile(Token &Result) {
- // If we hit the end of the file while parsing a preprocessor directive,
- // end the preprocessor directive first. The next token returned will
- // then be the end of file.
- if (ParsingPreprocessorDirective) {
- ParsingPreprocessorDirective = false; // Done parsing the "line".
- return true; // Have a token.
- }
-
- assert(!LexingRawMode);
-
- // If we are in a #if directive, emit an error.
- while (!ConditionalStack.empty()) {
- if (PP->getCodeCompletionFileLoc() != FileStartLoc)
- PP->Diag(ConditionalStack.back().IfLoc,
- diag::err_pp_unterminated_conditional);
- ConditionalStack.pop_back();
- }
-
- // Finally, let the preprocessor handle this.
- return PP->HandleEndOfFile(Result);
-}
-
-// FIXME: We can just grab the last token instead of storing a copy
-// into EofToken.
-void PTHLexer::getEOF(Token& Tok) {
- assert(EofToken.is(tok::eof));
- Tok = EofToken;
-}
-
-void PTHLexer::DiscardToEndOfLine() {
- assert(ParsingPreprocessorDirective && ParsingFilename == false &&
- "Must be in a preprocessing directive!");
-
- // We assume that if the preprocessor wishes to discard to the end of
- // the line that it also means to end the current preprocessor directive.
- ParsingPreprocessorDirective = false;
-
- // Skip tokens by only peeking at their token kind and the flags.
- // We don't need to actually reconstruct full tokens from the token buffer.
- // This saves some copies and it also reduces IdentifierInfo* lookup.
- const unsigned char* p = CurPtr;
- while (true) {
- // Read the token kind. Are we at the end of the file?
- tok::TokenKind x = (tok::TokenKind) (uint8_t) *p;
- if (x == tok::eof) break;
-
- // Read the token flags. Are we at the start of the next line?
- Token::TokenFlags y = (Token::TokenFlags) (uint8_t) p[1];
- if (y & Token::StartOfLine) break;
-
- // Skip to the next token.
- p += StoredTokenSize;
- }
-
- CurPtr = p;
-}
-
-/// SkipBlock - Used by Preprocessor to skip the current conditional block.
-bool PTHLexer::SkipBlock() {
- using namespace llvm::support;
-
- assert(CurPPCondPtr && "No cached PP conditional information.");
- assert(LastHashTokPtr && "No known '#' token.");
-
- const unsigned char *HashEntryI = nullptr;
- uint32_t TableIdx;
-
- do {
- // Read the token offset from the side-table.
- uint32_t Offset = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
-
- // Read the target table index from the side-table.
- TableIdx = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
-
- // Compute the actual memory address of the '#' token data for this entry.
- HashEntryI = TokBuf + Offset;
-
- // Optimization: "Sibling jumping". #if...#else...#endif blocks can
- // contain nested blocks. In the side-table we can jump over these
- // nested blocks instead of doing a linear search if the next "sibling"
- // entry is not at a location greater than LastHashTokPtr.
- if (HashEntryI < LastHashTokPtr && TableIdx) {
- // In the side-table we are still at an entry for a '#' token that
- // is earlier than the last one we saw. Check if the location we would
- // stride gets us closer.
- const unsigned char* NextPPCondPtr =
- PPCond + TableIdx*(sizeof(uint32_t)*2);
- assert(NextPPCondPtr >= CurPPCondPtr);
- // Read where we should jump to.
- const unsigned char *HashEntryJ =
- TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
-
- if (HashEntryJ <= LastHashTokPtr) {
- // Jump directly to the next entry in the side table.
- HashEntryI = HashEntryJ;
- TableIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
- CurPPCondPtr = NextPPCondPtr;
- }
- }
- }
- while (HashEntryI < LastHashTokPtr);
- assert(HashEntryI == LastHashTokPtr && "No PP-cond entry found for '#'");
- assert(TableIdx && "No jumping from #endifs.");
-
- // Update our side-table iterator.
- const unsigned char* NextPPCondPtr = PPCond + TableIdx*(sizeof(uint32_t)*2);
- assert(NextPPCondPtr >= CurPPCondPtr);
- CurPPCondPtr = NextPPCondPtr;
-
- // Read where we should jump to.
- HashEntryI =
- TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
- uint32_t NextIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
-
- // By construction NextIdx will be zero if this is a #endif. This is useful
- // to know to obviate lexing another token.
- bool isEndif = NextIdx == 0;
-
- // This case can occur when we see something like this:
- //
- // #if ...
- // /* a comment or nothing */
- // #elif
- //
- // If we are skipping the first #if block it will be the case that CurPtr
- // already points 'elif'. Just return.
-
- if (CurPtr > HashEntryI) {
- assert(CurPtr == HashEntryI + StoredTokenSize);
- // Did we reach a #endif? If so, go ahead and consume that token as well.
- if (isEndif)
- CurPtr += StoredTokenSize * 2;
- else
- LastHashTokPtr = HashEntryI;
-
- return isEndif;
- }
-
- // Otherwise, we need to advance. Update CurPtr to point to the '#' token.
- CurPtr = HashEntryI;
-
- // Update the location of the last observed '#'. This is useful if we
- // are skipping multiple blocks.
- LastHashTokPtr = CurPtr;
-
- // Skip the '#' token.
- assert(((tok::TokenKind)*CurPtr) == tok::hash);
- CurPtr += StoredTokenSize;
-
- // Did we reach a #endif? If so, go ahead and consume that token as well.
- if (isEndif) {
- CurPtr += StoredTokenSize * 2;
- }
-
- return isEndif;
-}
-
-SourceLocation PTHLexer::getSourceLocation() {
- // getSourceLocation is not on the hot path. It is used to get the location
- // of the next token when transitioning back to this lexer when done
- // handling a #included file. Just read the necessary data from the token
- // data buffer to construct the SourceLocation object.
- // NOTE: This is a virtual function; hence it is defined out-of-line.
- using namespace llvm::support;
-
- const unsigned char *OffsetPtr = CurPtr + (StoredTokenSize - 4);
- uint32_t Offset = endian::readNext<uint32_t, little, aligned>(OffsetPtr);
- return FileStartLoc.getLocWithOffset(Offset);
-}
-
-//===----------------------------------------------------------------------===//
-// PTH file lookup: map from strings to file data.
-//===----------------------------------------------------------------------===//
-
-/// PTHFileLookup - This internal data structure is used by the PTHManager
-/// to map from FileEntry objects managed by FileManager to offsets within
-/// the PTH file.
-namespace {
-
-class PTHFileData {
- const uint32_t TokenOff;
- const uint32_t PPCondOff;
-
-public:
- PTHFileData(uint32_t tokenOff, uint32_t ppCondOff)
- : TokenOff(tokenOff), PPCondOff(ppCondOff) {}
-
- uint32_t getTokenOffset() const { return TokenOff; }
- uint32_t getPPCondOffset() const { return PPCondOff; }
-};
-
-class PTHFileLookupCommonTrait {
-public:
- using internal_key_type = std::pair<unsigned char, StringRef>;
- using hash_value_type = unsigned;
- using offset_type = unsigned;
-
- static hash_value_type ComputeHash(internal_key_type x) {
- return llvm::djbHash(x.second);
- }
-
- static std::pair<unsigned, unsigned>
- ReadKeyDataLength(const unsigned char*& d) {
- using namespace llvm::support;
-
- unsigned keyLen =
- (unsigned)endian::readNext<uint16_t, little, unaligned>(d);
- unsigned dataLen = (unsigned) *(d++);
- return std::make_pair(keyLen, dataLen);
- }
-
- static internal_key_type ReadKey(const unsigned char* d, unsigned) {
- unsigned char k = *(d++); // Read the entry kind.
- return std::make_pair(k, (const char*) d);
- }
-};
-
-} // namespace
-
-class PTHManager::PTHFileLookupTrait : public PTHFileLookupCommonTrait {
-public:
- using external_key_type = const FileEntry *;
- using data_type = PTHFileData;
-
- static internal_key_type GetInternalKey(const FileEntry* FE) {
- return std::make_pair((unsigned char) 0x1, FE->getName());
- }
-
- static bool EqualKey(internal_key_type a, internal_key_type b) {
- return a.first == b.first && a.second == b.second;
- }
-
- static PTHFileData ReadData(const internal_key_type& k,
- const unsigned char* d, unsigned) {
- using namespace llvm::support;
-
- assert(k.first == 0x1 && "Only file lookups can match!");
- uint32_t x = endian::readNext<uint32_t, little, unaligned>(d);
- uint32_t y = endian::readNext<uint32_t, little, unaligned>(d);
- return PTHFileData(x, y);
- }
-};
-
-class PTHManager::PTHStringLookupTrait {
-public:
- using data_type = uint32_t;
- using external_key_type = const std::pair<const char *, unsigned>;
- using internal_key_type = external_key_type;
- using hash_value_type = uint32_t;
- using offset_type = unsigned;
-
- static bool EqualKey(const internal_key_type& a,
- const internal_key_type& b) {
- return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
- : false;
- }
-
- static hash_value_type ComputeHash(const internal_key_type& a) {
- return llvm::djbHash(StringRef(a.first, a.second));
- }
-
- // This hopefully will just get inlined and removed by the optimizer.
- static const internal_key_type&
- GetInternalKey(const external_key_type& x) { return x; }
-
- static std::pair<unsigned, unsigned>
- ReadKeyDataLength(const unsigned char*& d) {
- using namespace llvm::support;
-
- return std::make_pair(
- (unsigned)endian::readNext<uint16_t, little, unaligned>(d),
- sizeof(uint32_t));
- }
-
- static std::pair<const char*, unsigned>
- ReadKey(const unsigned char* d, unsigned n) {
- assert(n >= 2 && d[n-1] == '\0');
- return std::make_pair((const char*) d, n-1);
- }
-
- static uint32_t ReadData(const internal_key_type& k, const unsigned char* d,
- unsigned) {
- using namespace llvm::support;
-
- return endian::readNext<uint32_t, little, unaligned>(d);
- }
-};
-
-//===----------------------------------------------------------------------===//
-// PTHManager methods.
-//===----------------------------------------------------------------------===//
-
-PTHManager::PTHManager(
- std::unique_ptr<const llvm::MemoryBuffer> buf,
- std::unique_ptr<PTHFileLookup> fileLookup, const unsigned char *idDataTable,
- std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> perIDCache,
- std::unique_ptr<PTHStringIdLookup> stringIdLookup, unsigned numIds,
- const unsigned char *spellingBase, const char *originalSourceFile)
- : Buf(std::move(buf)), PerIDCache(std::move(perIDCache)),
- FileLookup(std::move(fileLookup)), IdDataTable(idDataTable),
- StringIdLookup(std::move(stringIdLookup)), NumIds(numIds),
- SpellingBase(spellingBase), OriginalSourceFile(originalSourceFile) {}
-
-PTHManager::~PTHManager() = default;
-
-static void InvalidPTH(DiagnosticsEngine &Diags, const char *Msg) {
- Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0")) << Msg;
-}
-
-PTHManager *PTHManager::Create(StringRef file, DiagnosticsEngine &Diags) {
- // Memory map the PTH file.
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileOrErr =
- llvm::MemoryBuffer::getFile(file);
-
- if (!FileOrErr) {
- // FIXME: Add ec.message() to this diag.
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
- std::unique_ptr<llvm::MemoryBuffer> File = std::move(FileOrErr.get());
-
- using namespace llvm::support;
-
- // Get the buffer ranges and check if there are at least three 32-bit
- // words at the end of the file.
- const unsigned char *BufBeg = (const unsigned char*)File->getBufferStart();
- const unsigned char *BufEnd = (const unsigned char*)File->getBufferEnd();
-
- // Check the prologue of the file.
- if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 4 + 4) ||
- memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth")) != 0) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Read the PTH version.
- const unsigned char *p = BufBeg + (sizeof("cfe-pth"));
- unsigned Version = endian::readNext<uint32_t, little, aligned>(p);
-
- if (Version < PTHManager::Version) {
- InvalidPTH(Diags,
- Version < PTHManager::Version
- ? "PTH file uses an older PTH format that is no longer supported"
- : "PTH file uses a newer PTH format that cannot be read");
- return nullptr;
- }
-
- // Compute the address of the index table at the end of the PTH file.
- const unsigned char *PrologueOffset = p;
-
- if (PrologueOffset >= BufEnd) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Construct the file lookup table. This will be used for mapping from
- // FileEntry*'s to cached tokens.
- const unsigned char* FileTableOffset = PrologueOffset + sizeof(uint32_t)*2;
- const unsigned char *FileTable =
- BufBeg + endian::readNext<uint32_t, little, aligned>(FileTableOffset);
-
- if (!(FileTable > BufBeg && FileTable < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr; // FIXME: Proper error diagnostic?
- }
-
- std::unique_ptr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
-
- // Warn if the PTH file is empty. We still want to create a PTHManager
- // as the PTH could be used with -include-pth.
- if (FL->isEmpty())
- InvalidPTH(Diags, "PTH file contains no cached source data");
-
- // Get the location of the table mapping from persistent ids to the
- // data needed to reconstruct identifiers.
- const unsigned char* IDTableOffset = PrologueOffset + sizeof(uint32_t)*0;
- const unsigned char *IData =
- BufBeg + endian::readNext<uint32_t, little, aligned>(IDTableOffset);
-
- if (!(IData >= BufBeg && IData < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Get the location of the hashtable mapping between strings and
- // persistent IDs.
- const unsigned char* StringIdTableOffset = PrologueOffset + sizeof(uint32_t)*1;
- const unsigned char *StringIdTable =
- BufBeg + endian::readNext<uint32_t, little, aligned>(StringIdTableOffset);
- if (!(StringIdTable >= BufBeg && StringIdTable < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- std::unique_ptr<PTHStringIdLookup> SL(
- PTHStringIdLookup::Create(StringIdTable, BufBeg));
-
- // Get the location of the spelling cache.
- const unsigned char* spellingBaseOffset = PrologueOffset + sizeof(uint32_t)*3;
- const unsigned char *spellingBase =
- BufBeg + endian::readNext<uint32_t, little, aligned>(spellingBaseOffset);
- if (!(spellingBase >= BufBeg && spellingBase < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Get the number of IdentifierInfos and pre-allocate the identifier cache.
- uint32_t NumIds = endian::readNext<uint32_t, little, aligned>(IData);
-
- // Pre-allocate the persistent ID -> IdentifierInfo* cache. We use calloc()
- // so that we in the best case only zero out memory once when the OS returns
- // us new pages.
- std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> PerIDCache;
-
- if (NumIds) {
- PerIDCache.reset((IdentifierInfo **)calloc(NumIds, sizeof(PerIDCache[0])));
- if (!PerIDCache) {
- InvalidPTH(Diags, "Could not allocate memory for processing PTH file");
- return nullptr;
- }
- }
-
- // Compute the address of the original source file.
- const unsigned char* originalSourceBase = PrologueOffset + sizeof(uint32_t)*4;
- unsigned len =
- endian::readNext<uint16_t, little, unaligned>(originalSourceBase);
- if (!len) originalSourceBase = nullptr;
-
- // Create the new PTHManager.
- return new PTHManager(std::move(File), std::move(FL), IData,
- std::move(PerIDCache), std::move(SL), NumIds,
- spellingBase, (const char *)originalSourceBase);
-}
-
-IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
- using namespace llvm::support;
-
- // Look in the PTH file for the string data for the IdentifierInfo object.
- const unsigned char* TableEntry = IdDataTable + sizeof(uint32_t)*PersistentID;
- const unsigned char *IDData =
- (const unsigned char *)Buf->getBufferStart() +
- endian::readNext<uint32_t, little, aligned>(TableEntry);
- assert(IDData < (const unsigned char*)Buf->getBufferEnd());
-
- // Allocate the object.
- std::pair<IdentifierInfo,const unsigned char*> *Mem =
- Alloc.Allocate<std::pair<IdentifierInfo, const unsigned char *>>();
-
- Mem->second = IDData;
- assert(IDData[0] != '\0');
- IdentifierInfo *II = new ((void*) Mem) IdentifierInfo();
-
- // Store the new IdentifierInfo in the cache.
- PerIDCache[PersistentID] = II;
- assert(II->getNameStart() && II->getNameStart()[0] != '\0');
- return II;
-}
-
-IdentifierInfo* PTHManager::get(StringRef Name) {
- // Double check our assumption that the last character isn't '\0'.
- assert(Name.empty() || Name.back() != '\0');
- PTHStringIdLookup::iterator I =
- StringIdLookup->find(std::make_pair(Name.data(), Name.size()));
- if (I == StringIdLookup->end()) // No identifier found?
- return nullptr;
-
- // Match found. Return the identifier!
- assert(*I > 0);
- return GetIdentifierInfo(*I-1);
-}
-
-PTHLexer *PTHManager::CreateLexer(FileID FID) {
- const FileEntry *FE = PP->getSourceManager().getFileEntryForID(FID);
- if (!FE)
- return nullptr;
-
- using namespace llvm::support;
-
- // Lookup the FileEntry object in our file lookup data structure. It will
- // return a variant that indicates whether or not there is an offset within
- // the PTH file that contains cached tokens.
- PTHFileLookup::iterator I = FileLookup->find(FE);
-
- if (I == FileLookup->end()) // No tokens available?
- return nullptr;
-
- const PTHFileData& FileData = *I;
-
- const unsigned char *BufStart = (const unsigned char *)Buf->getBufferStart();
- // Compute the offset of the token data within the buffer.
- const unsigned char* data = BufStart + FileData.getTokenOffset();
-
- // Get the location of pp-conditional table.
- const unsigned char* ppcond = BufStart + FileData.getPPCondOffset();
- uint32_t Len = endian::readNext<uint32_t, little, aligned>(ppcond);
- if (Len == 0) ppcond = nullptr;
-
- assert(PP && "No preprocessor set yet!");
- return new PTHLexer(*PP, FID, data, ppcond, *this);
-}
-
-//===----------------------------------------------------------------------===//
-// 'stat' caching.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class PTHStatData {
-public:
- uint64_t Size;
- time_t ModTime;
- llvm::sys::fs::UniqueID UniqueID;
- const bool HasData = false;
- bool IsDirectory;
-
- PTHStatData() = default;
- PTHStatData(uint64_t Size, time_t ModTime, llvm::sys::fs::UniqueID UniqueID,
- bool IsDirectory)
- : Size(Size), ModTime(ModTime), UniqueID(UniqueID), HasData(true),
- IsDirectory(IsDirectory) {}
-};
-
-class PTHStatLookupTrait : public PTHFileLookupCommonTrait {
-public:
- using external_key_type = StringRef; // const char*
- using data_type = PTHStatData;
-
- static internal_key_type GetInternalKey(StringRef path) {
- // The key 'kind' doesn't matter here because it is ignored in EqualKey.
- return std::make_pair((unsigned char) 0x0, path);
- }
-
- static bool EqualKey(internal_key_type a, internal_key_type b) {
- // When doing 'stat' lookups we don't care about the kind of 'a' and 'b',
- // just the paths.
- return a.second == b.second;
- }
-
- static data_type ReadData(const internal_key_type& k, const unsigned char* d,
- unsigned) {
- if (k.first /* File or Directory */) {
- bool IsDirectory = true;
- if (k.first == 0x1 /* File */) {
- IsDirectory = false;
- d += 4 * 2; // Skip the first 2 words.
- }
-
- using namespace llvm::support;
-
- uint64_t File = endian::readNext<uint64_t, little, unaligned>(d);
- uint64_t Device = endian::readNext<uint64_t, little, unaligned>(d);
- llvm::sys::fs::UniqueID UniqueID(Device, File);
- time_t ModTime = endian::readNext<uint64_t, little, unaligned>(d);
- uint64_t Size = endian::readNext<uint64_t, little, unaligned>(d);
- return data_type(Size, ModTime, UniqueID, IsDirectory);
- }
-
- // Negative stat. Don't read anything.
- return data_type();
- }
-};
-
-} // namespace
-
-namespace clang {
-
-class PTHStatCache : public FileSystemStatCache {
- using CacheTy = llvm::OnDiskChainedHashTable<PTHStatLookupTrait>;
-
- CacheTy Cache;
-
-public:
- PTHStatCache(PTHManager::PTHFileLookup &FL)
- : Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
- FL.getBase()) {}
-
- LookupResult getStat(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F,
- vfs::FileSystem &FS) override {
- // Do the lookup for the file's data in the PTH file.
- CacheTy::iterator I = Cache.find(Path);
-
- // If we don't get a hit in the PTH file just forward to 'stat'.
- if (I == Cache.end())
- return statChained(Path, Data, isFile, F, FS);
-
- const PTHStatData &D = *I;
-
- if (!D.HasData)
- return CacheMissing;
-
- Data.Name = Path;
- Data.Size = D.Size;
- Data.ModTime = D.ModTime;
- Data.UniqueID = D.UniqueID;
- Data.IsDirectory = D.IsDirectory;
- Data.IsNamedPipe = false;
- Data.InPCH = true;
-
- return CacheExists;
- }
-};
-
-} // namespace clang
-
-std::unique_ptr<FileSystemStatCache> PTHManager::createStatCache() {
- return llvm::make_unique<PTHStatCache>(*FileLookup);
-}
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 0a63ed724c..575935119f 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -31,7 +31,6 @@
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorLexer.h"
-#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/Token.h"
#include "clang/Lex/TokenLexer.h"
#include "llvm/ADT/ArrayRef.h"
@@ -404,10 +403,7 @@ void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
void Preprocessor::HandlePragmaMark() {
assert(CurPPLexer && "No current lexer?");
- if (CurLexer)
- CurLexer->ReadToEndOfLine();
- else
- CurPTHLexer->DiscardToEndOfLine();
+ CurLexer->ReadToEndOfLine();
}
/// HandlePragmaPoison - Handle \#pragma GCC poison. PoisonTok is the 'poison'.
@@ -810,12 +806,6 @@ void Preprocessor::HandlePragmaModuleBuild(Token &Tok) {
DiscardUntilEndOfDirective();
}
- if (CurPTHLexer) {
- // FIXME: Support this somehow?
- Diag(Loc, diag::err_pp_module_build_pth);
- return;
- }
-
CurLexer->LexingRawMode = true;
auto TryConsumeIdentifier = [&](StringRef Ident) -> bool {
@@ -1130,10 +1120,6 @@ struct PragmaDebugHandler : public PragmaHandler {
}
void HandleCaptured(Preprocessor &PP) {
- // Skip if emitting preprocessed output.
- if (PP.isPreprocessedOutput())
- return;
-
Token Tok;
PP.LexUnexpandedToken(Tok);
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index 48b3571ab2..047a4caaca 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -44,8 +44,6 @@
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/ModuleLoader.h"
-#include "clang/Lex/PTHLexer.h"
-#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Pragma.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/PreprocessorLexer.h"
@@ -224,11 +222,6 @@ void Preprocessor::FinalizeForModelFile() {
PragmaHandlers = std::move(PragmaHandlersBackup);
}
-void Preprocessor::setPTHManager(PTHManager* pm) {
- PTH.reset(pm);
- FileMgr.addStatCache(PTH->createStatCache());
-}
-
void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
llvm::errs() << tok::getTokenName(Tok.getKind()) << " '"
<< getSpelling(Tok) << "'";
@@ -379,8 +372,6 @@ StringRef Preprocessor::getLastMacroWithSpelling(
void Preprocessor::recomputeCurLexerKind() {
if (CurLexer)
CurLexerKind = CLK_Lexer;
- else if (CurPTHLexer)
- CurLexerKind = CLK_PTHLexer;
else if (CurTokenLexer)
CurLexerKind = CLK_TokenLexer;
else
@@ -877,9 +868,6 @@ void Preprocessor::Lex(Token &Result) {
case CLK_Lexer:
ReturnedToken = CurLexer->Lex(Result);
break;
- case CLK_PTHLexer:
- ReturnedToken = CurPTHLexer->Lex(Result);
- break;
case CLK_TokenLexer:
ReturnedToken = CurTokenLexer->Lex(Result);
break;
@@ -943,7 +931,7 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
// If we have a non-empty module path, load the named module.
if (!ModuleImportPath.empty()) {
// Under the Modules TS, the dot is just part of the module name, and not
- // a real hierarachy separator. Flatten such module names now.
+ // a real hierarchy separator. Flatten such module names now.
//
// FIXME: Is this the right level to be performing this transformation?
std::string FlatModuleName;
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
index 5898120cab..fde3ce00f8 100644
--- a/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -595,7 +595,7 @@ void Parser::ParseLexedMemberInitializers(ParsingClass &Class) {
// to X" within the optional brace-or-equal-initializer. It shall not
// appear elsewhere in the member-declarator.
Sema::CXXThisScopeRAII ThisScope(Actions, Class.TagOrTemplate,
- /*TypeQuals=*/(unsigned)0);
+ Qualifiers());
for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
Class.LateParsedDeclarations[i]->ParseLexedMemberInitializers();
@@ -728,7 +728,7 @@ bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
case tok::semi:
if (StopAtSemi)
return false;
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
default:
// consume this token.
Toks.push_back(Tok);
@@ -1108,13 +1108,13 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
goto consume_token;
if (AngleCount) --AngleCount;
if (KnownTemplateCount) --KnownTemplateCount;
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::greatergreater:
if (!getLangOpts().CPlusPlus11)
goto consume_token;
if (AngleCount) --AngleCount;
if (KnownTemplateCount) --KnownTemplateCount;
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::greater:
if (AngleCount) --AngleCount;
if (KnownTemplateCount) --KnownTemplateCount;
@@ -1219,7 +1219,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
case tok::semi:
if (CIK == CIK_DefaultInitializer)
return true; // End of the default initializer.
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
default:
consume_token:
Toks.push_back(Tok);
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index bbcc860bc2..7538b635f0 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -24,7 +24,6 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
@@ -754,7 +753,7 @@ void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
case tok::kw__Null_unspecified: {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
Diag(AttrNameLoc, diag::ext_nullability)
<< AttrName;
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
@@ -1001,6 +1000,21 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
continue;
}
+ if (Keyword == Ident_deprecated && Platform->Ident &&
+ Platform->Ident->isStr("swift")) {
+ // For swift, we deprecate for all versions.
+ if (Changes[Deprecated].KeywordLoc.isValid()) {
+ Diag(KeywordLoc, diag::err_availability_redundant)
+ << Keyword
+ << SourceRange(Changes[Deprecated].KeywordLoc);
+ }
+
+ Changes[Deprecated].KeywordLoc = KeywordLoc;
+ // Use a fake version here.
+ Changes[Deprecated].Version = VersionTuple(1);
+ continue;
+ }
+
if (Tok.isNot(tok::equal)) {
Diag(Tok, diag::err_expected_after) << Keyword << tok::equal;
SkipUntil(tok::r_paren, StopAtSemi);
@@ -1412,7 +1426,7 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
// Allow 'this' within late-parsed attributes.
- Sema::CXXThisScopeRAII ThisScope(Actions, RD, /*TypeQuals=*/0,
+ Sema::CXXThisScopeRAII ThisScope(Actions, RD, Qualifiers(),
ND && ND->isCXXInstanceMember());
if (LA.Decls.size() == 1) {
@@ -3290,7 +3304,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C supports type arguments and protocol references
// following an Objective-C object or object pointer
// type. Handle either one of them.
- if (Tok.is(tok::less) && getLangOpts().ObjC1) {
+ if (Tok.is(tok::less) && getLangOpts().ObjC) {
SourceLocation NewEndLoc;
TypeResult NewTypeRep = parseObjCTypeArgsAndProtocolQualifiers(
Loc, TypeRep, /*consumeLastToken=*/true,
@@ -3812,7 +3826,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// GCC ObjC supports types like "<SomeProtocol>" as a synonym for
// "id<SomeProtocol>". This is hopelessly old fashioned and dangerous,
// but we support it.
- if (DS.hasTypeSpecifier() || !getLangOpts().ObjC1)
+ if (DS.hasTypeSpecifier() || !getLangOpts().ObjC)
goto DoneWithDeclSpec;
SourceLocation StartLoc = Tok.getLocation();
@@ -3838,7 +3852,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
assert(PrevSpec && "Method did not return previous specifier!");
assert(DiagID);
- if (DiagID == diag::ext_duplicate_declspec)
+ if (DiagID == diag::ext_duplicate_declspec ||
+ DiagID == diag::ext_warn_duplicate_declspec)
Diag(Tok, DiagID)
<< PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
else if (DiagID == diag::err_opencl_unknown_type_specifier) {
@@ -4272,7 +4287,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SourceRange Range;
BaseType = ParseTypeName(&Range);
- if (!getLangOpts().ObjC2) {
+ if (!getLangOpts().ObjC) {
if (getLangOpts().CPlusPlus11)
Diag(StartLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type);
else if (getLangOpts().CPlusPlus)
@@ -4660,7 +4675,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::identifier: // foo::bar
if (TryAltiVecVectorToken())
return true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
@@ -4739,7 +4754,7 @@ bool Parser::isTypeSpecifierQualifier() {
// GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
case tok::less:
- return getLangOpts().ObjC1;
+ return getLangOpts().ObjC;
case tok::kw___cdecl:
case tok::kw___stdcall:
@@ -4790,11 +4805,11 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::identifier: // foo::bar
// Unfortunate hack to support "Class.factoryMethod" notation.
- if (getLangOpts().ObjC1 && NextToken().is(tok::period))
+ if (getLangOpts().ObjC && NextToken().is(tok::period))
return false;
if (TryAltiVecVectorToken())
return true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::kw_decltype: // decltype(T())::type
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
@@ -4920,7 +4935,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
// GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
case tok::less:
- return getLangOpts().ObjC1;
+ return getLangOpts().ObjC;
// typedef-name
case tok::annot_typename:
@@ -5761,7 +5776,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
if (D.getContext() == DeclaratorContext::MemberContext) {
// Objective-C++: Detect C++ keywords and try to prevent further errors by
// treating these keyword as valid member names.
- if (getLangOpts().ObjC1 && getLangOpts().CPlusPlus &&
+ if (getLangOpts().ObjC && getLangOpts().CPlusPlus &&
Tok.getIdentifierInfo() &&
Tok.getIdentifierInfo()->isCPlusPlusKeyword(getLangOpts())) {
Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
@@ -6146,13 +6161,14 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
: D.getContext() == DeclaratorContext::FileContext &&
D.getCXXScopeSpec().isValid() &&
Actions.CurContext->isRecord());
- Sema::CXXThisScopeRAII ThisScope(Actions,
- dyn_cast<CXXRecordDecl>(Actions.CurContext),
- DS.getTypeQualifiers() |
- (D.getDeclSpec().isConstexprSpecified() &&
- !getLangOpts().CPlusPlus14
- ? Qualifiers::Const : 0),
- IsCXX11MemberFunction);
+
+ Qualifiers Q = Qualifiers::fromCVRUMask(DS.getTypeQualifiers());
+ if (D.getDeclSpec().isConstexprSpecified() && !getLangOpts().CPlusPlus14)
+ Q.addConst();
+
+ Sema::CXXThisScopeRAII ThisScope(
+ Actions, dyn_cast<CXXRecordDecl>(Actions.CurContext), Q,
+ IsCXX11MemberFunction);
// Parse exception-specification[opt].
bool Delayed = D.isFirstDeclarationOfMember() &&
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index a86f7ed864..02c73979ba 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -24,7 +24,6 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -33,24 +32,25 @@ using namespace clang;
/// may either be a top level namespace or a block-level namespace alias. If
/// there was an inline keyword, it has already been parsed.
///
-/// namespace-definition: [C++ 7.3: basic.namespace]
+/// namespace-definition: [C++: namespace.def]
/// named-namespace-definition
/// unnamed-namespace-definition
+/// nested-namespace-definition
+///
+/// named-namespace-definition:
+/// 'inline'[opt] 'namespace' attributes[opt] identifier '{'
+/// namespace-body '}'
///
/// unnamed-namespace-definition:
/// 'inline'[opt] 'namespace' attributes[opt] '{' namespace-body '}'
///
-/// named-namespace-definition:
-/// original-namespace-definition
-/// extension-namespace-definition
+/// nested-namespace-definition:
+/// 'namespace' enclosing-namespace-specifier '::' 'inline'[opt]
+/// identifier '{' namespace-body '}'
///
-/// original-namespace-definition:
-/// 'inline'[opt] 'namespace' identifier attributes[opt]
-/// '{' namespace-body '}'
-///
-/// extension-namespace-definition:
-/// 'inline'[opt] 'namespace' original-namespace-name
-/// '{' namespace-body '}'
+/// enclosing-namespace-specifier:
+/// identifier
+/// enclosing-namespace-specifier '::' 'inline'[opt] identifier
///
/// namespace-alias-definition: [C++ 7.3.2: namespace.alias]
/// 'namespace' identifier '=' qualified-namespace-specifier ';'
@@ -70,9 +70,8 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
SourceLocation IdentLoc;
IdentifierInfo *Ident = nullptr;
- std::vector<SourceLocation> ExtraIdentLoc;
- std::vector<IdentifierInfo*> ExtraIdent;
- std::vector<SourceLocation> ExtraNamespaceLoc;
+ InnerNamespaceInfoList ExtraNSs;
+ SourceLocation FirstNestedInlineLoc;
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation attrLoc;
@@ -88,15 +87,29 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
if (Tok.is(tok::identifier)) {
Ident = Tok.getIdentifierInfo();
IdentLoc = ConsumeToken(); // eat the identifier.
- while (Tok.is(tok::coloncolon) && NextToken().is(tok::identifier)) {
- ExtraNamespaceLoc.push_back(ConsumeToken());
- ExtraIdent.push_back(Tok.getIdentifierInfo());
- ExtraIdentLoc.push_back(ConsumeToken());
+ while (Tok.is(tok::coloncolon) &&
+ (NextToken().is(tok::identifier) ||
+ (NextToken().is(tok::kw_inline) &&
+ GetLookAheadToken(2).is(tok::identifier)))) {
+
+ InnerNamespaceInfo Info;
+ Info.NamespaceLoc = ConsumeToken();
+
+ if (Tok.is(tok::kw_inline)) {
+ Info.InlineLoc = ConsumeToken();
+ if (FirstNestedInlineLoc.isInvalid())
+ FirstNestedInlineLoc = Info.InlineLoc;
+ }
+
+ Info.Ident = Tok.getIdentifierInfo();
+ Info.IdentLoc = ConsumeToken();
+
+ ExtraNSs.push_back(Info);
}
}
// A nested namespace definition cannot have attributes.
- if (!ExtraNamespaceLoc.empty() && attrLoc.isValid())
+ if (!ExtraNSs.empty() && attrLoc.isValid())
Diag(attrLoc, diag::err_unexpected_nested_namespace_attribute);
// Read label attributes, if present.
@@ -138,13 +151,21 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
return nullptr;
}
- if (ExtraIdent.empty()) {
+ if (ExtraNSs.empty()) {
// Normal namespace definition, not a nested-namespace-definition.
} else if (InlineLoc.isValid()) {
Diag(InlineLoc, diag::err_inline_nested_namespace_definition);
+ } else if (getLangOpts().CPlusPlus2a) {
+ Diag(ExtraNSs[0].NamespaceLoc,
+ diag::warn_cxx14_compat_nested_namespace_definition);
+ if (FirstNestedInlineLoc.isValid())
+ Diag(FirstNestedInlineLoc,
+ diag::warn_cxx17_compat_inline_nested_namespace_definition);
} else if (getLangOpts().CPlusPlus17) {
- Diag(ExtraNamespaceLoc[0],
+ Diag(ExtraNSs[0].NamespaceLoc,
diag::warn_cxx14_compat_nested_namespace_definition);
+ if (FirstNestedInlineLoc.isValid())
+ Diag(FirstNestedInlineLoc, diag::ext_inline_nested_namespace_definition);
} else {
TentativeParsingAction TPA(*this);
SkipUntil(tok::r_brace, StopBeforeMatch);
@@ -152,26 +173,34 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
TPA.Revert();
if (!rBraceToken.is(tok::r_brace)) {
- Diag(ExtraNamespaceLoc[0], diag::ext_nested_namespace_definition)
- << SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
+ Diag(ExtraNSs[0].NamespaceLoc, diag::ext_nested_namespace_definition)
+ << SourceRange(ExtraNSs.front().NamespaceLoc,
+ ExtraNSs.back().IdentLoc);
} else {
std::string NamespaceFix;
- for (std::vector<IdentifierInfo*>::iterator I = ExtraIdent.begin(),
- E = ExtraIdent.end(); I != E; ++I) {
- NamespaceFix += " { namespace ";
- NamespaceFix += (*I)->getName();
+ for (const auto &ExtraNS : ExtraNSs) {
+ NamespaceFix += " { ";
+ if (ExtraNS.InlineLoc.isValid())
+ NamespaceFix += "inline ";
+ NamespaceFix += "namespace ";
+ NamespaceFix += ExtraNS.Ident->getName();
}
std::string RBraces;
- for (unsigned i = 0, e = ExtraIdent.size(); i != e; ++i)
+ for (unsigned i = 0, e = ExtraNSs.size(); i != e; ++i)
RBraces += "} ";
- Diag(ExtraNamespaceLoc[0], diag::ext_nested_namespace_definition)
- << FixItHint::CreateReplacement(SourceRange(ExtraNamespaceLoc.front(),
- ExtraIdentLoc.back()),
- NamespaceFix)
+ Diag(ExtraNSs[0].NamespaceLoc, diag::ext_nested_namespace_definition)
+ << FixItHint::CreateReplacement(
+ SourceRange(ExtraNSs.front().NamespaceLoc,
+ ExtraNSs.back().IdentLoc),
+ NamespaceFix)
<< FixItHint::CreateInsertion(rBraceToken.getLocation(), RBraces);
}
+
+ // Warn about nested inline namespaces.
+ if (FirstNestedInlineLoc.isValid())
+ Diag(FirstNestedInlineLoc, diag::ext_inline_nested_namespace_definition);
}
// If we're still good, complain about inline namespaces in non-C++0x now.
@@ -192,8 +221,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
// Parse the contents of the namespace. This includes parsing recovery on
// any improperly nested namespaces.
- ParseInnerNamespace(ExtraIdentLoc, ExtraIdent, ExtraNamespaceLoc, 0,
- InlineLoc, attrs, T);
+ ParseInnerNamespace(ExtraNSs, 0, InlineLoc, attrs, T);
// Leave the namespace scope.
NamespaceScope.Exit();
@@ -206,13 +234,11 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
}
/// ParseInnerNamespace - Parse the contents of a namespace.
-void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
- std::vector<IdentifierInfo *> &Ident,
- std::vector<SourceLocation> &NamespaceLoc,
+void Parser::ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker) {
- if (index == Ident.size()) {
+ if (index == InnerNSs.size()) {
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
ParsedAttributesWithRange attrs(AttrFactory);
@@ -233,14 +259,13 @@ void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
ParseScope NamespaceScope(this, Scope::DeclScope);
UsingDirectiveDecl *ImplicitUsingDirectiveDecl = nullptr;
Decl *NamespcDecl = Actions.ActOnStartNamespaceDef(
- getCurScope(), SourceLocation(), NamespaceLoc[index], IdentLoc[index],
- Ident[index], Tracker.getOpenLocation(), attrs,
- ImplicitUsingDirectiveDecl);
+ getCurScope(), InnerNSs[index].InlineLoc, InnerNSs[index].NamespaceLoc,
+ InnerNSs[index].IdentLoc, InnerNSs[index].Ident,
+ Tracker.getOpenLocation(), attrs, ImplicitUsingDirectiveDecl);
assert(!ImplicitUsingDirectiveDecl &&
"nested namespace definition cannot define anonymous namespace");
- ParseInnerNamespace(IdentLoc, Ident, NamespaceLoc, ++index, InlineLoc,
- attrs, Tracker);
+ ParseInnerNamespace(InnerNSs, ++index, InlineLoc, attrs, Tracker);
NamespaceScope.Exit();
Actions.ActOnFinishNamespaceDef(NamespcDecl, Tracker.getCloseLocation());
@@ -365,7 +390,7 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
case tok::r_brace:
if (!NestedModules)
break;
- // Fall through.
+ LLVM_FALLTHROUGH;
default:
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
@@ -2410,7 +2435,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject *TemplateDiags) {
if (Tok.is(tok::at)) {
- if (getLangOpts().ObjC1 && NextToken().isObjCAtKeyword(tok::objc_defs))
+ if (getLangOpts().ObjC && NextToken().isObjCAtKeyword(tok::objc_defs))
Diag(Tok, diag::err_at_defs_cxx);
else
Diag(Tok, diag::err_at_in_class);
@@ -3790,6 +3815,28 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
}
return nullptr;
+ case tok::numeric_constant: {
+ // If we got a numeric constant, check to see if it comes from a macro that
+ // corresponds to the predefined __clang__ macro. If it does, warn the user
+ // and recover by pretending they said _Clang instead.
+ if (Tok.getLocation().isMacroID()) {
+ SmallString<8> ExpansionBuf;
+ SourceLocation ExpansionLoc =
+ PP.getSourceManager().getExpansionLoc(Tok.getLocation());
+ StringRef Spelling = PP.getSpelling(ExpansionLoc, ExpansionBuf);
+ if (Spelling == "__clang__") {
+ SourceRange TokRange(
+ ExpansionLoc,
+ PP.getSourceManager().getExpansionLoc(Tok.getEndLoc()));
+ Diag(Tok, diag::warn_wrong_clang_attr_namespace)
+ << FixItHint::CreateReplacement(TokRange, "_Clang");
+ Loc = ConsumeToken();
+ return &PP.getIdentifierTable().get("_Clang");
+ }
+ }
+ return nullptr;
+ }
+
case tok::ampamp: // 'and'
case tok::pipe: // 'bitor'
case tok::pipepipe: // 'or'
@@ -3868,7 +3915,7 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
return false;
}
- if (ScopeName && ScopeName->getName() == "gnu") {
+ if (ScopeName && (ScopeName->isStr("gnu") || ScopeName->isStr("__gnu__"))) {
// GNU-scoped attributes have some special cases to handle GNU-specific
// behaviors.
ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
@@ -3878,10 +3925,9 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
unsigned NumArgs;
// Some Clang-scoped attributes have some special parsing behavior.
- if (ScopeName && ScopeName->getName() == "clang")
- NumArgs =
- ParseClangAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ if (ScopeName && (ScopeName->isStr("clang") || ScopeName->isStr("_Clang")))
+ NumArgs = ParseClangAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, Syntax);
else
NumArgs =
ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index b9028bffa1..194b07df46 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -320,7 +320,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// as an identifier in ParseObjCMessageExpressionBody. i.e., we support:
// [foo meth:0 and:0];
// [foo not_eq];
- if (getLangOpts().ObjC1 && getLangOpts().CPlusPlus &&
+ if (getLangOpts().ObjC && getLangOpts().CPlusPlus &&
Tok.isOneOf(tok::colon, tok::r_square) &&
OpToken.getIdentifierInfo() != nullptr) {
PP.EnterToken(Tok);
@@ -393,10 +393,11 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
}
}
- // Code completion for the right-hand side of an assignment expression
- // goes through a special hook that takes the left-hand side into account.
- if (Tok.is(tok::code_completion) && NextTokPrec == prec::Assignment) {
- Actions.CodeCompleteAssignmentRHS(getCurScope(), LHS.get());
+ // Code completion for the right-hand side of a binary expression goes
+ // through a special hook that takes the left-hand side into account.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteBinaryRHS(getCurScope(), LHS.get(),
+ OpToken.getKind());
cutOffParsing();
return ExprError();
}
@@ -957,7 +958,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
SourceLocation ILoc = ConsumeToken();
// Support 'Class.property' and 'super.property' notation.
- if (getLangOpts().ObjC1 && Tok.is(tok::period) &&
+ if (getLangOpts().ObjC && Tok.is(tok::period) &&
(Actions.getTypeName(II, ILoc, getCurScope()) ||
// Allow the base to be 'super' if in an objc-method.
(&II == Ident_super && getCurScope()->isInObjcMethodScope()))) {
@@ -987,7 +988,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// the token sequence is ill-formed. However, if there's a ':' or ']' after
// that identifier, this is probably a message send with a missing open
// bracket. Treat it as such.
- if (getLangOpts().ObjC1 && &II == Ident_super && !InMessageExpression &&
+ if (getLangOpts().ObjC && &II == Ident_super && !InMessageExpression &&
getCurScope()->isInObjcMethodScope() &&
((Tok.is(tok::identifier) &&
(NextToken().is(tok::colon) || NextToken().is(tok::r_square))) ||
@@ -1002,7 +1003,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// send that's missing the opening '['. Recovery
// appropriately. Also take this path if we're performing code
// completion after an Objective-C class name.
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
((Tok.is(tok::identifier) && !InMessageExpression) ||
Tok.is(tok::code_completion))) {
const Token& Next = NextToken();
@@ -1175,7 +1176,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw__Alignof: // unary-expression: '_Alignof' '(' type-name ')'
if (!getLangOpts().C11)
Diag(Tok, diag::ext_c11_alignment) << Tok.getName();
- // fallthrough
+ LLVM_FALLTHROUGH;
case tok::kw_alignof: // unary-expression: 'alignof' '(' type-id ')'
case tok::kw___alignof: // unary-expression: '__alignof' unary-expression
// unary-expression: '__alignof' '(' type-name ')'
@@ -1241,7 +1242,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Ty.get(), nullptr);
break;
}
- // Fall through
+ LLVM_FALLTHROUGH;
case tok::annot_decltype:
case tok::kw_char:
@@ -1429,7 +1430,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
}
case tok::l_square:
if (getLangOpts().CPlusPlus11) {
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
// C++11 lambda expressions and Objective-C message sends both start with a
// square bracket. There are three possibilities here:
// we have a valid lambda expression, we have an invalid lambda
@@ -1443,11 +1444,11 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Res = ParseLambdaExpression();
break;
}
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
Res = ParseObjCMessageExpression();
break;
}
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
default:
NotCastExpr = true;
return ExprError();
@@ -1511,7 +1512,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// If we see identifier: after an expression, and we're not already in a
// message send, then this is probably a message send with a missing
// opening bracket '['.
- if (getLangOpts().ObjC1 && !InMessageExpression &&
+ if (getLangOpts().ObjC && !InMessageExpression &&
(NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
LHS = ParseObjCMessageExpressionBody(SourceLocation(), SourceLocation(),
nullptr, LHS.get());
@@ -1529,7 +1530,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// actually another message send. In this case, do some look-ahead to see
// if the contents of the square brackets are obviously not a valid
// expression and recover by pretending there is no suffix.
- if (getLangOpts().ObjC1 && Tok.isAtStartOfLine() &&
+ if (getLangOpts().ObjC && Tok.isAtStartOfLine() &&
isSimpleObjCMessageExpression())
return LHS;
@@ -1793,7 +1794,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// FIXME: Add support for explicit call of template constructor.
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
- if (getLangOpts().ObjC2 && OpKind == tok::period &&
+ if (getLangOpts().ObjC && OpKind == tok::period &&
Tok.is(tok::kw_class)) {
// Objective-C++:
// After a '.' in a member access expression, treat the keyword
@@ -1809,7 +1810,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
/*EnteringContext=*/false,
/*AllowDestructorName=*/true,
/*AllowConstructorName=*/
- getLangOpts().MicrosoftExt,
+ getLangOpts().MicrosoftExt &&
+ SS.isNotEmpty(),
/*AllowDeductionGuide=*/false,
ObjectType, &TemplateKWLoc, Name)) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
@@ -2022,8 +2024,10 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
CastRange);
UnaryExprOrTypeTrait ExprKind = UETT_SizeOf;
- if (OpTok.isOneOf(tok::kw_alignof, tok::kw___alignof, tok::kw__Alignof))
+ if (OpTok.isOneOf(tok::kw_alignof, tok::kw__Alignof))
ExprKind = UETT_AlignOf;
+ else if (OpTok.is(tok::kw___alignof))
+ ExprKind = UETT_PreferredAlignOf;
else if (OpTok.is(tok::kw_vec_step))
ExprKind = UETT_VecStep;
else if (OpTok.is(tok::kw___builtin_omp_required_simd_align))
@@ -2336,7 +2340,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
}
// Diagnose use of bridge casts in non-arc mode.
- bool BridgeCast = (getLangOpts().ObjC2 &&
+ bool BridgeCast = (getLangOpts().ObjC &&
Tok.isOneOf(tok::kw___bridge,
tok::kw___bridge_transfer,
tok::kw___bridge_retained,
@@ -2446,7 +2450,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// this is probably an Objective-C message send where the leading '[' is
// missing. Recover as if that were the case.
if (!DeclaratorInfo.isInvalidType() && Tok.is(tok::identifier) &&
- !InMessageExpression && getLangOpts().ObjC1 &&
+ !InMessageExpression && getLangOpts().ObjC &&
(NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
TypeResult Ty;
{
@@ -2532,7 +2536,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
}
// Reject the cast of super idiom in ObjC.
- if (Tok.is(tok::identifier) && getLangOpts().ObjC1 &&
+ if (Tok.is(tok::identifier) && getLangOpts().ObjC &&
Tok.getIdentifierInfo() == Ident_super &&
getCurScope()->isInObjcMethodScope() &&
GetLookAheadToken(1).isNot(tok::period)) {
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index 8a8cd818e2..359bcf9e71 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -235,22 +235,11 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
while (true) {
if (HasScopeSpecifier) {
- // C++ [basic.lookup.classref]p5:
- // If the qualified-id has the form
- //
- // ::class-name-or-namespace-name::...
- //
- // the class-name-or-namespace-name is looked up in global scope as a
- // class-name or namespace-name.
- //
- // To implement this, we clear out the object type as soon as we've
- // seen a leading '::' or part of a nested-name-specifier.
- ObjectType = nullptr;
-
if (Tok.is(tok::code_completion)) {
// Code completion for a nested-name-specifier, where the code
// completion token follows the '::'.
- Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext);
+ Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext,
+ ObjectType.get());
// Include code completion token into the range of the scope otherwise
// when we try to annotate the scope tokens the dangling code completion
// token will cause assertion in
@@ -259,6 +248,18 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
cutOffParsing();
return true;
}
+
+ // C++ [basic.lookup.classref]p5:
+ // If the qualified-id has the form
+ //
+ // ::class-name-or-namespace-name::...
+ //
+ // the class-name-or-namespace-name is looked up in global scope as a
+ // class-name or namespace-name.
+ //
+ // To implement this, we clear out the object type as soon as we've
+ // seen a leading '::' or part of a nested-name-specifier.
+ ObjectType = nullptr;
}
// nested-name-specifier:
@@ -774,7 +775,7 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// send. In that case, fail here and let the ObjC message
// expression parser perform the completion.
if (Tok.is(tok::code_completion) &&
- !(getLangOpts().ObjC1 && Intro.Default == LCD_None &&
+ !(getLangOpts().ObjC && Intro.Default == LCD_None &&
!Intro.Captures.empty())) {
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
/*AfterAmpersand=*/false);
@@ -790,7 +791,7 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
if (Tok.is(tok::code_completion)) {
// If we're in Objective-C++ and we have a bare '[', then this is more
// likely to be a message receiver.
- if (getLangOpts().ObjC1 && first)
+ if (getLangOpts().ObjC && first)
Actions.CodeCompleteObjCMessageReceiver(getCurScope());
else
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
@@ -1773,7 +1774,13 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
// if (; true);
if (InitStmt && Tok.is(tok::semi)) {
WarnOnInit();
- SourceLocation SemiLoc = ConsumeToken();
+ SourceLocation SemiLoc = Tok.getLocation();
+ if (!Tok.hasLeadingEmptyMacro() && !SemiLoc.isMacroID()) {
+ Diag(SemiLoc, diag::warn_empty_init_statement)
+ << (CK == Sema::ConditionKind::Switch)
+ << FixItHint::CreateRemoval(SemiLoc);
+ }
+ ConsumeToken();
*InitStmt = Actions.ActOnNullStmt(SemiLoc);
return ParseCXXCondition(nullptr, Loc, CK);
}
diff --git a/lib/Parse/ParseInit.cpp b/lib/Parse/ParseInit.cpp
index 0cd550bc6a..7742a5087c 100644
--- a/lib/Parse/ParseInit.cpp
+++ b/lib/Parse/ParseInit.cpp
@@ -209,7 +209,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// send) or send to 'super', parse this as a message send
// expression. We handle C++ and C separately, since C++ requires
// much more complicated parsing.
- if (getLangOpts().ObjC1 && getLangOpts().CPlusPlus) {
+ if (getLangOpts().ObjC && getLangOpts().CPlusPlus) {
// Send to 'super'.
if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
NextToken().isNot(tok::period) &&
@@ -242,7 +242,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// adopt the expression for further analysis below.
// FIXME: potentially-potentially evaluated expression above?
Idx = ExprResult(static_cast<Expr*>(TypeOrExpr));
- } else if (getLangOpts().ObjC1 && Tok.is(tok::identifier)) {
+ } else if (getLangOpts().ObjC && Tok.is(tok::identifier)) {
IdentifierInfo *II = Tok.getIdentifierInfo();
SourceLocation IILoc = Tok.getLocation();
ParsedType ReceiverType;
@@ -312,7 +312,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// tokens are '...' or ']' or an objc message send. If this is an objc
// message send, handle it now. An objc-message send is the start of
// an assignment-expression production.
- if (getLangOpts().ObjC1 && Tok.isNot(tok::ellipsis) &&
+ if (getLangOpts().ObjC && Tok.isNot(tok::ellipsis) &&
Tok.isNot(tok::r_square)) {
CheckArrayDesignatorSyntax(*this, Tok.getLocation(), Desig);
return ParseAssignmentExprWithObjCMessageExprStart(
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index d2e37e5ebc..c8d7bda3d6 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -263,7 +263,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
categoryId = Tok.getIdentifierInfo();
categoryLoc = ConsumeToken();
}
- else if (!getLangOpts().ObjC2) {
+ else if (!getLangOpts().ObjC) {
Diag(Tok, diag::err_expected)
<< tok::identifier; // missing category name.
return nullptr;
@@ -705,7 +705,7 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
break;
case tok::objc_property:
- if (!getLangOpts().ObjC2)
+ if (!getLangOpts().ObjC)
Diag(AtLoc, diag::err_objc_properties_require_objc2);
ObjCDeclSpec OCDS;
@@ -1103,7 +1103,7 @@ bool Parser::isTokIdentifier_in() const {
// FIXME: May have to do additional look-ahead to only allow for
// valid tokens following an 'in'; such as an identifier, unary operators,
// '[' etc.
- return (getLangOpts().ObjC2 && Tok.is(tok::identifier) &&
+ return (getLangOpts().ObjC && Tok.is(tok::identifier) &&
Tok.getIdentifierInfo() == ObjCTypeQuals[objc_in]);
}
@@ -1337,7 +1337,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist before the method, parse them.
ParsedAttributes methodAttrs(AttrFactory);
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
MaybeParseGNUAttributes(methodAttrs);
MaybeParseCXX11Attributes(methodAttrs);
@@ -1364,7 +1364,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
SmallVector<DeclaratorChunk::ParamInfo, 8> CParamInfo;
if (Tok.isNot(tok::colon)) {
// If attributes exist after the method, parse them.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
MaybeParseGNUAttributes(methodAttrs);
MaybeParseCXX11Attributes(methodAttrs);
@@ -1400,7 +1400,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist before the argument name, parse them.
// Regardless, collect all the attributes we've parsed so far.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
MaybeParseGNUAttributes(paramAttrs);
MaybeParseCXX11Attributes(paramAttrs);
ArgInfo.ArgAttrs = paramAttrs;
@@ -1484,7 +1484,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// FIXME: Add support for optional parameter list...
// If attributes exist after the method, parse them.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
MaybeParseGNUAttributes(methodAttrs);
MaybeParseCXX11Attributes(methodAttrs);
@@ -1550,7 +1550,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
TypeResult Parser::parseObjCProtocolQualifierType(SourceLocation &rAngleLoc) {
assert(Tok.is(tok::less) && "Protocol qualifiers start with '<'");
- assert(getLangOpts().ObjC1 && "Protocol qualifiers only exist in Objective-C");
+ assert(getLangOpts().ObjC && "Protocol qualifiers only exist in Objective-C");
SourceLocation lAngleLoc;
SmallVector<Decl *, 8> protocols;
@@ -2945,14 +2945,14 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
/// This routine will only return true for a subset of valid message-send
/// expressions.
bool Parser::isSimpleObjCMessageExpression() {
- assert(Tok.is(tok::l_square) && getLangOpts().ObjC1 &&
+ assert(Tok.is(tok::l_square) && getLangOpts().ObjC &&
"Incorrect start for isSimpleObjCMessageExpression");
return GetLookAheadToken(1).is(tok::identifier) &&
GetLookAheadToken(2).is(tok::identifier);
}
bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
- if (!getLangOpts().ObjC1 || !NextToken().is(tok::identifier) ||
+ if (!getLangOpts().ObjC || !NextToken().is(tok::identifier) ||
InMessageExpression)
return false;
diff --git a/lib/Parse/ParseOpenMP.cpp b/lib/Parse/ParseOpenMP.cpp
index e8a17b5e12..1fb52801a9 100644
--- a/lib/Parse/ParseOpenMP.cpp
+++ b/lib/Parse/ParseOpenMP.cpp
@@ -495,7 +495,7 @@ public:
Sema &Actions = P.getActions();
// Allow 'this' within late-parsed attributes.
- ThisScope = new Sema::CXXThisScopeRAII(Actions, RD, /*TypeQuals=*/0,
+ ThisScope = new Sema::CXXThisScopeRAII(Actions, RD, Qualifiers(),
ND && ND->isCXXInstanceMember());
// If the Decl is templatized, add template parameters to scope.
@@ -644,6 +644,60 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
LinModifiers, Steps, SourceRange(Loc, EndLoc));
}
+Parser::DeclGroupPtrTy Parser::ParseOMPDeclareTargetClauses() {
+ // OpenMP 4.5 syntax with list of entities.
+ Sema::NamedDeclSetType SameDirectiveDecls;
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OMPDeclareTargetDeclAttr::MapTypeTy MT = OMPDeclareTargetDeclAttr::MT_To;
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ StringRef ClauseName = II->getName();
+ // Parse 'to|link' clauses.
+ if (!OMPDeclareTargetDeclAttr::ConvertStrToMapTypeTy(ClauseName, MT)) {
+ Diag(Tok, diag::err_omp_declare_target_unexpected_clause) << ClauseName;
+ break;
+ }
+ ConsumeToken();
+ }
+ auto &&Callback = [this, MT, &SameDirectiveDecls](
+ CXXScopeSpec &SS, DeclarationNameInfo NameInfo) {
+ Actions.ActOnOpenMPDeclareTargetName(getCurScope(), SS, NameInfo, MT,
+ SameDirectiveDecls);
+ };
+ if (ParseOpenMPSimpleVarList(OMPD_declare_target, Callback,
+ /*AllowScopeSpecifier=*/true))
+ break;
+
+ // Consume optional ','.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ }
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ ConsumeAnyToken();
+ SmallVector<Decl *, 4> Decls(SameDirectiveDecls.begin(),
+ SameDirectiveDecls.end());
+ if (Decls.empty())
+ return DeclGroupPtrTy();
+ return Actions.BuildDeclaratorGroup(Decls);
+}
+
+void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
+ SourceLocation DTLoc) {
+ if (DKind != OMPD_end_declare_target) {
+ Diag(Tok, diag::err_expected_end_declare_target);
+ Diag(DTLoc, diag::note_matching) << "'#pragma omp declare target'";
+ return;
+ }
+ ConsumeAnyToken();
+ if (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
+ << getOpenMPDirectiveName(OMPD_end_declare_target);
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ }
+ // Skip the last annot_pragma_openmp_end.
+ ConsumeAnyToken();
+}
+
/// Parsing of declarative OpenMP directives.
///
/// threadprivate-directive:
@@ -785,43 +839,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_declare_target: {
SourceLocation DTLoc = ConsumeAnyToken();
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- // OpenMP 4.5 syntax with list of entities.
- Sema::NamedDeclSetType SameDirectiveDecls;
- while (Tok.isNot(tok::annot_pragma_openmp_end)) {
- OMPDeclareTargetDeclAttr::MapTypeTy MT =
- OMPDeclareTargetDeclAttr::MT_To;
- if (Tok.is(tok::identifier)) {
- IdentifierInfo *II = Tok.getIdentifierInfo();
- StringRef ClauseName = II->getName();
- // Parse 'to|link' clauses.
- if (!OMPDeclareTargetDeclAttr::ConvertStrToMapTypeTy(ClauseName,
- MT)) {
- Diag(Tok, diag::err_omp_declare_target_unexpected_clause)
- << ClauseName;
- break;
- }
- ConsumeToken();
- }
- auto &&Callback = [this, MT, &SameDirectiveDecls](
- CXXScopeSpec &SS, DeclarationNameInfo NameInfo) {
- Actions.ActOnOpenMPDeclareTargetName(getCurScope(), SS, NameInfo, MT,
- SameDirectiveDecls);
- };
- if (ParseOpenMPSimpleVarList(OMPD_declare_target, Callback,
- /*AllowScopeSpecifier=*/true))
- break;
-
- // Consume optional ','.
- if (Tok.is(tok::comma))
- ConsumeToken();
- }
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- ConsumeAnyToken();
- SmallVector<Decl *, 4> Decls(SameDirectiveDecls.begin(),
- SameDirectiveDecls.end());
- if (Decls.empty())
- return DeclGroupPtrTy();
- return Actions.BuildDeclaratorGroup(Decls);
+ return ParseOMPDeclareTargetClauses();
}
// Skip the last annot_pragma_openmp_end.
@@ -860,19 +878,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
}
}
- if (DKind == OMPD_end_declare_target) {
- ConsumeAnyToken();
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_end_declare_target);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
- // Skip the last annot_pragma_openmp_end.
- ConsumeAnyToken();
- } else {
- Diag(Tok, diag::err_expected_end_declare_target);
- Diag(DTLoc, diag::note_matching) << "'#pragma omp declare target'";
- }
+ ParseOMPEndDeclareTargetDirective(DKind, DTLoc);
Actions.ActOnFinishOpenMPDeclareTargetDirective();
return Actions.BuildDeclaratorGroup(Decls);
}
@@ -1337,11 +1343,15 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
break;
case OMPC_default:
case OMPC_proc_bind:
+ case OMPC_atomic_default_mem_order:
// OpenMP [2.14.3.1, Restrictions]
// Only a single default clause may be specified on a parallel, task or
// teams directive.
// OpenMP [2.5, parallel Construct, Restrictions]
// At most one proc_bind clause can appear on the directive.
+ // OpenMP [5.0, Requires directive, Restrictions]
+ // At most one atomic_default_mem_order clause can appear
+ // on the directive
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
@@ -1379,6 +1389,9 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_simd:
case OMPC_nogroup:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
// OpenMP [2.7.1, Restrictions, p. 9]
// Only one ordered clause can appear on a loop directive.
// OpenMP [2.7.1, Restrictions, C/C++, p. 4]
@@ -1768,7 +1781,6 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
OpenMPVarListDataTy &Data) {
UnqualifiedId UnqualifiedReductionId;
bool InvalidReductionId = false;
- bool MapTypeModifierSpecified = false;
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
@@ -1855,7 +1867,6 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)))
: OMPC_MAP_unknown;
Data.DepLinMapLoc = Tok.getLocation();
- bool ColonExpected = false;
if (IsMapClauseModifierToken(Tok)) {
if (PP.LookAhead(0).is(tok::colon)) {
@@ -1871,8 +1882,6 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (Data.MapTypeModifier != OMPC_MAP_always) {
Diag(Tok, diag::err_omp_unknown_map_type_modifier);
Data.MapTypeModifier = OMPC_MAP_unknown;
- } else {
- MapTypeModifierSpecified = true;
}
ConsumeToken();
@@ -1897,8 +1906,6 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (Data.MapTypeModifier != OMPC_MAP_always) {
Diag(Tok, diag::err_omp_unknown_map_type_modifier);
Data.MapTypeModifier = OMPC_MAP_unknown;
- } else {
- MapTypeModifierSpecified = true;
}
ConsumeToken();
@@ -1927,17 +1934,13 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (Tok.is(tok::colon))
Data.ColonLoc = ConsumeToken();
- else if (ColonExpected)
- Diag(Tok, diag::warn_pragma_expected_colon) << "map type";
}
bool IsComma =
(Kind != OMPC_reduction && Kind != OMPC_task_reduction &&
Kind != OMPC_in_reduction && Kind != OMPC_depend && Kind != OMPC_map) ||
(Kind == OMPC_reduction && !InvalidReductionId) ||
- (Kind == OMPC_map && Data.MapType != OMPC_MAP_unknown &&
- (!MapTypeModifierSpecified ||
- Data.MapTypeModifier == OMPC_MAP_always)) ||
+ (Kind == OMPC_map && Data.MapType != OMPC_MAP_unknown) ||
(Kind == OMPC_depend && Data.DepKind != OMPC_DEPEND_unknown);
const bool MayHaveTail = (Kind == OMPC_linear || Kind == OMPC_aligned);
while (IsComma || (Tok.isNot(tok::r_paren) && Tok.isNot(tok::colon) &&
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
index 26b363f280..e476c9b0f9 100644
--- a/lib/Parse/ParsePragma.cpp
+++ b/lib/Parse/ParsePragma.cpp
@@ -15,10 +15,10 @@
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/LoopHint.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
-#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -261,119 +261,122 @@ struct PragmaAttributeHandler : public PragmaHandler {
} // end namespace
void Parser::initializePragmaHandlers() {
- AlignHandler.reset(new PragmaAlignHandler());
+ AlignHandler = llvm::make_unique<PragmaAlignHandler>();
PP.AddPragmaHandler(AlignHandler.get());
- GCCVisibilityHandler.reset(new PragmaGCCVisibilityHandler());
+ GCCVisibilityHandler = llvm::make_unique<PragmaGCCVisibilityHandler>();
PP.AddPragmaHandler("GCC", GCCVisibilityHandler.get());
- OptionsHandler.reset(new PragmaOptionsHandler());
+ OptionsHandler = llvm::make_unique<PragmaOptionsHandler>();
PP.AddPragmaHandler(OptionsHandler.get());
- PackHandler.reset(new PragmaPackHandler());
+ PackHandler = llvm::make_unique<PragmaPackHandler>();
PP.AddPragmaHandler(PackHandler.get());
- MSStructHandler.reset(new PragmaMSStructHandler());
+ MSStructHandler = llvm::make_unique<PragmaMSStructHandler>();
PP.AddPragmaHandler(MSStructHandler.get());
- UnusedHandler.reset(new PragmaUnusedHandler());
+ UnusedHandler = llvm::make_unique<PragmaUnusedHandler>();
PP.AddPragmaHandler(UnusedHandler.get());
- WeakHandler.reset(new PragmaWeakHandler());
+ WeakHandler = llvm::make_unique<PragmaWeakHandler>();
PP.AddPragmaHandler(WeakHandler.get());
- RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler());
+ RedefineExtnameHandler = llvm::make_unique<PragmaRedefineExtnameHandler>();
PP.AddPragmaHandler(RedefineExtnameHandler.get());
- FPContractHandler.reset(new PragmaFPContractHandler());
+ FPContractHandler = llvm::make_unique<PragmaFPContractHandler>();
PP.AddPragmaHandler("STDC", FPContractHandler.get());
- STDCFENVHandler.reset(new PragmaSTDC_FENV_ACCESSHandler());
+ STDCFENVHandler = llvm::make_unique<PragmaSTDC_FENV_ACCESSHandler>();
PP.AddPragmaHandler("STDC", STDCFENVHandler.get());
- STDCCXLIMITHandler.reset(new PragmaSTDC_CX_LIMITED_RANGEHandler());
+ STDCCXLIMITHandler = llvm::make_unique<PragmaSTDC_CX_LIMITED_RANGEHandler>();
PP.AddPragmaHandler("STDC", STDCCXLIMITHandler.get());
- STDCUnknownHandler.reset(new PragmaSTDC_UnknownHandler());
+ STDCUnknownHandler = llvm::make_unique<PragmaSTDC_UnknownHandler>();
PP.AddPragmaHandler("STDC", STDCUnknownHandler.get());
- PCSectionHandler.reset(new PragmaClangSectionHandler(Actions));
+ PCSectionHandler = llvm::make_unique<PragmaClangSectionHandler>(Actions);
PP.AddPragmaHandler("clang", PCSectionHandler.get());
if (getLangOpts().OpenCL) {
- OpenCLExtensionHandler.reset(new PragmaOpenCLExtensionHandler());
+ OpenCLExtensionHandler = llvm::make_unique<PragmaOpenCLExtensionHandler>();
PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
PP.AddPragmaHandler("OPENCL", FPContractHandler.get());
}
if (getLangOpts().OpenMP)
- OpenMPHandler.reset(new PragmaOpenMPHandler());
+ OpenMPHandler = llvm::make_unique<PragmaOpenMPHandler>();
else
- OpenMPHandler.reset(new PragmaNoOpenMPHandler());
+ OpenMPHandler = llvm::make_unique<PragmaNoOpenMPHandler>();
PP.AddPragmaHandler(OpenMPHandler.get());
if (getLangOpts().MicrosoftExt ||
getTargetInfo().getTriple().isOSBinFormatELF()) {
- MSCommentHandler.reset(new PragmaCommentHandler(Actions));
+ MSCommentHandler = llvm::make_unique<PragmaCommentHandler>(Actions);
PP.AddPragmaHandler(MSCommentHandler.get());
}
if (getLangOpts().MicrosoftExt) {
- MSDetectMismatchHandler.reset(new PragmaDetectMismatchHandler(Actions));
+ MSDetectMismatchHandler =
+ llvm::make_unique<PragmaDetectMismatchHandler>(Actions);
PP.AddPragmaHandler(MSDetectMismatchHandler.get());
- MSPointersToMembers.reset(new PragmaMSPointersToMembers());
+ MSPointersToMembers = llvm::make_unique<PragmaMSPointersToMembers>();
PP.AddPragmaHandler(MSPointersToMembers.get());
- MSVtorDisp.reset(new PragmaMSVtorDisp());
+ MSVtorDisp = llvm::make_unique<PragmaMSVtorDisp>();
PP.AddPragmaHandler(MSVtorDisp.get());
- MSInitSeg.reset(new PragmaMSPragma("init_seg"));
+ MSInitSeg = llvm::make_unique<PragmaMSPragma>("init_seg");
PP.AddPragmaHandler(MSInitSeg.get());
- MSDataSeg.reset(new PragmaMSPragma("data_seg"));
+ MSDataSeg = llvm::make_unique<PragmaMSPragma>("data_seg");
PP.AddPragmaHandler(MSDataSeg.get());
- MSBSSSeg.reset(new PragmaMSPragma("bss_seg"));
+ MSBSSSeg = llvm::make_unique<PragmaMSPragma>("bss_seg");
PP.AddPragmaHandler(MSBSSSeg.get());
- MSConstSeg.reset(new PragmaMSPragma("const_seg"));
+ MSConstSeg = llvm::make_unique<PragmaMSPragma>("const_seg");
PP.AddPragmaHandler(MSConstSeg.get());
- MSCodeSeg.reset(new PragmaMSPragma("code_seg"));
+ MSCodeSeg = llvm::make_unique<PragmaMSPragma>("code_seg");
PP.AddPragmaHandler(MSCodeSeg.get());
- MSSection.reset(new PragmaMSPragma("section"));
+ MSSection = llvm::make_unique<PragmaMSPragma>("section");
PP.AddPragmaHandler(MSSection.get());
- MSRuntimeChecks.reset(new PragmaMSRuntimeChecksHandler());
+ MSRuntimeChecks = llvm::make_unique<PragmaMSRuntimeChecksHandler>();
PP.AddPragmaHandler(MSRuntimeChecks.get());
- MSIntrinsic.reset(new PragmaMSIntrinsicHandler());
+ MSIntrinsic = llvm::make_unique<PragmaMSIntrinsicHandler>();
PP.AddPragmaHandler(MSIntrinsic.get());
- MSOptimize.reset(new PragmaMSOptimizeHandler());
+ MSOptimize = llvm::make_unique<PragmaMSOptimizeHandler>();
PP.AddPragmaHandler(MSOptimize.get());
}
if (getLangOpts().CUDA) {
- CUDAForceHostDeviceHandler.reset(
- new PragmaForceCUDAHostDeviceHandler(Actions));
+ CUDAForceHostDeviceHandler =
+ llvm::make_unique<PragmaForceCUDAHostDeviceHandler>(Actions);
PP.AddPragmaHandler("clang", CUDAForceHostDeviceHandler.get());
}
- OptimizeHandler.reset(new PragmaOptimizeHandler(Actions));
+ OptimizeHandler = llvm::make_unique<PragmaOptimizeHandler>(Actions);
PP.AddPragmaHandler("clang", OptimizeHandler.get());
- LoopHintHandler.reset(new PragmaLoopHintHandler());
+ LoopHintHandler = llvm::make_unique<PragmaLoopHintHandler>();
PP.AddPragmaHandler("clang", LoopHintHandler.get());
- UnrollHintHandler.reset(new PragmaUnrollHintHandler("unroll"));
+ UnrollHintHandler = llvm::make_unique<PragmaUnrollHintHandler>("unroll");
PP.AddPragmaHandler(UnrollHintHandler.get());
- NoUnrollHintHandler.reset(new PragmaUnrollHintHandler("nounroll"));
+ NoUnrollHintHandler = llvm::make_unique<PragmaUnrollHintHandler>("nounroll");
PP.AddPragmaHandler(NoUnrollHintHandler.get());
- UnrollAndJamHintHandler.reset(new PragmaUnrollHintHandler("unroll_and_jam"));
+ UnrollAndJamHintHandler =
+ llvm::make_unique<PragmaUnrollHintHandler>("unroll_and_jam");
PP.AddPragmaHandler(UnrollAndJamHintHandler.get());
- NoUnrollAndJamHintHandler.reset(
- new PragmaUnrollHintHandler("nounroll_and_jam"));
+ NoUnrollAndJamHintHandler =
+ llvm::make_unique<PragmaUnrollHintHandler>("nounroll_and_jam");
PP.AddPragmaHandler(NoUnrollAndJamHintHandler.get());
- FPHandler.reset(new PragmaFPHandler());
+ FPHandler = llvm::make_unique<PragmaFPHandler>();
PP.AddPragmaHandler("clang", FPHandler.get());
- AttributePragmaHandler.reset(new PragmaAttributeHandler(AttrFactory));
+ AttributePragmaHandler =
+ llvm::make_unique<PragmaAttributeHandler>(AttrFactory);
PP.AddPragmaHandler("clang", AttributePragmaHandler.get());
}
@@ -1133,7 +1136,7 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
namespace {
struct PragmaAttributeInfo {
- enum ActionType { Push, Pop };
+ enum ActionType { Push, Pop, Attribute };
ParsedAttributes &Attributes;
ActionType Action;
ArrayRef<Token> Tokens;
@@ -1394,8 +1397,16 @@ void Parser::HandlePragmaAttribute() {
return;
}
// Parse the actual attribute with its arguments.
- assert(Info->Action == PragmaAttributeInfo::Push &&
+ assert((Info->Action == PragmaAttributeInfo::Push ||
+ Info->Action == PragmaAttributeInfo::Attribute) &&
"Unexpected #pragma attribute command");
+
+ if (Info->Action == PragmaAttributeInfo::Push && Info->Tokens.empty()) {
+ ConsumeAnnotationToken();
+ Actions.ActOnPragmaAttributeEmptyPush(PragmaLoc);
+ return;
+ }
+
PP.EnterTokenStream(Info->Tokens, /*DisableMacroExpansion=*/false);
ConsumeAnnotationToken();
@@ -1542,8 +1553,12 @@ void Parser::HandlePragmaAttribute() {
// Consume the eof terminator token.
ConsumeToken();
- Actions.ActOnPragmaAttributePush(Attribute, PragmaLoc,
- std::move(SubjectMatchRules));
+ // Handle a mixed push/attribute by desurging to a push, then an attribute.
+ if (Info->Action == PragmaAttributeInfo::Push)
+ Actions.ActOnPragmaAttributeEmptyPush(PragmaLoc);
+
+ Actions.ActOnPragmaAttributeAttribute(Attribute, PragmaLoc,
+ std::move(SubjectMatchRules));
}
// #pragma GCC visibility comes in two variants:
@@ -3104,6 +3119,8 @@ void PragmaForceCUDAHostDeviceHandler::HandlePragma(
/// The syntax is:
/// \code
/// #pragma clang attribute push(attribute, subject-set)
+/// #pragma clang attribute push
+/// #pragma clang attribute (attribute, subject-set)
/// #pragma clang attribute pop
/// \endcode
///
@@ -3122,25 +3139,33 @@ void PragmaAttributeHandler::HandlePragma(Preprocessor &PP,
auto *Info = new (PP.getPreprocessorAllocator())
PragmaAttributeInfo(AttributesForPragmaAttribute);
- // Parse the 'push' or 'pop'.
- if (Tok.isNot(tok::identifier)) {
- PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_expected_push_pop);
+ if (!Tok.isOneOf(tok::identifier, tok::l_paren)) {
+ PP.Diag(Tok.getLocation(),
+ diag::err_pragma_attribute_expected_push_pop_paren);
return;
}
- const auto *II = Tok.getIdentifierInfo();
- if (II->isStr("push"))
- Info->Action = PragmaAttributeInfo::Push;
- else if (II->isStr("pop"))
- Info->Action = PragmaAttributeInfo::Pop;
+
+ // Determine what action this pragma clang attribute represents.
+ if (Tok.is(tok::l_paren))
+ Info->Action = PragmaAttributeInfo::Attribute;
else {
- PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_invalid_argument)
- << PP.getSpelling(Tok);
- return;
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("push"))
+ Info->Action = PragmaAttributeInfo::Push;
+ else if (II->isStr("pop"))
+ Info->Action = PragmaAttributeInfo::Pop;
+ else {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_invalid_argument)
+ << PP.getSpelling(Tok);
+ return;
+ }
+
+ PP.Lex(Tok);
}
- PP.Lex(Tok);
// Parse the actual attribute.
- if (Info->Action == PragmaAttributeInfo::Push) {
+ if ((Info->Action == PragmaAttributeInfo::Push && Tok.isNot(tok::eod)) ||
+ Info->Action == PragmaAttributeInfo::Attribute) {
if (Tok.isNot(tok::l_paren)) {
PP.Diag(Tok.getLocation(), diag::err_expected) << tok::l_paren;
return;
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index ba7c808838..313793c3e8 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -15,10 +15,10 @@
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Parse/LoopHint.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
-#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
using namespace clang;
@@ -930,6 +930,34 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
}
+/// Consume any extra semi-colons resulting in null statements,
+/// returning true if any tok::semi were consumed.
+bool Parser::ConsumeNullStmt(StmtVector &Stmts) {
+ if (!Tok.is(tok::semi))
+ return false;
+
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+
+ while (Tok.is(tok::semi) && !Tok.hasLeadingEmptyMacro() &&
+ Tok.getLocation().isValid() && !Tok.getLocation().isMacroID()) {
+ EndLoc = Tok.getLocation();
+
+ // Don't just ConsumeToken() this tok::semi, do store it in AST.
+ StmtResult R = ParseStatementOrDeclaration(Stmts, ACK_Any);
+ if (R.isUsable())
+ Stmts.push_back(R.get());
+ }
+
+ // Did not consume any extra semi.
+ if (EndLoc.isInvalid())
+ return false;
+
+ Diag(StartLoc, diag::warn_null_statement)
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+ return true;
+}
+
/// ParseCompoundStatementBody - Parse a sequence of statements and invoke the
/// ActOnCompoundStmt action. This expects the '{' to be the current token, and
/// consume the '}' at the end of the block. It does not manipulate the scope
@@ -992,6 +1020,9 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
continue;
}
+ if (ConsumeNullStmt(Stmts))
+ continue;
+
StmtResult R;
if (Tok.isNot(tok::kw___extension__)) {
R = ParseStatementOrDeclaration(Stmts, ACK_Any);
@@ -1542,7 +1573,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
}
bool C99orCXXorObjC = getLangOpts().C99 || getLangOpts().CPlusPlus ||
- getLangOpts().ObjC1;
+ getLangOpts().ObjC;
// C99 6.8.5p5 - In C99, the for statement is a block. This is not
// the case for C90. Start the loop scope.
@@ -1588,10 +1619,15 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
+ SourceLocation EmptyInitStmtSemiLoc;
+
// Parse the first part of the for specifier.
if (Tok.is(tok::semi)) { // for (;
ProhibitAttributes(attrs);
// no first part, eat the ';'.
+ SourceLocation SemiLoc = Tok.getLocation();
+ if (!Tok.hasLeadingEmptyMacro() && !SemiLoc.isMacroID())
+ EmptyInitStmtSemiLoc = SemiLoc;
ConsumeToken();
} else if (getLangOpts().CPlusPlus && Tok.is(tok::identifier) &&
isForRangeIdentifier()) {
@@ -1723,6 +1759,11 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
: diag::ext_for_range_init_stmt)
<< (FirstPart.get() ? FirstPart.get()->getSourceRange()
: SourceRange());
+ if (EmptyInitStmtSemiLoc.isValid()) {
+ Diag(EmptyInitStmtSemiLoc, diag::warn_empty_init_statement)
+ << /*for-loop*/ 2
+ << FixItHint::CreateRemoval(EmptyInitStmtSemiLoc);
+ }
}
} else {
ExprResult SecondExpr = ParseExpression();
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index 26709a5aaa..e0a7cc6e85 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -1381,26 +1381,37 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
SmallVector<ParseScope*, 4> TemplateParamScopeStack;
- // Get the list of DeclContexts to reenter.
- SmallVector<DeclContext*, 4> DeclContextsToReenter;
+ // Get the list of DeclContexts to reenter. For inline methods, we only want
+ // to push the DeclContext of the outermost class. This matches the way the
+ // parser normally parses bodies of inline methods when the outermost class is
+ // complete.
+ struct ContainingDC {
+ ContainingDC(DeclContext *DC, bool ShouldPush) : Pair(DC, ShouldPush) {}
+ llvm::PointerIntPair<DeclContext *, 1, bool> Pair;
+ DeclContext *getDC() { return Pair.getPointer(); }
+ bool shouldPushDC() { return Pair.getInt(); }
+ };
+ SmallVector<ContainingDC, 4> DeclContextsToReenter;
DeclContext *DD = FunD;
+ DeclContext *NextContaining = Actions.getContainingDC(DD);
while (DD && !DD->isTranslationUnit()) {
- DeclContextsToReenter.push_back(DD);
+ bool ShouldPush = DD == NextContaining;
+ DeclContextsToReenter.push_back({DD, ShouldPush});
+ if (ShouldPush)
+ NextContaining = Actions.getContainingDC(DD);
DD = DD->getLexicalParent();
}
// Reenter template scopes from outermost to innermost.
- SmallVectorImpl<DeclContext *>::reverse_iterator II =
- DeclContextsToReenter.rbegin();
- for (; II != DeclContextsToReenter.rend(); ++II) {
- TemplateParamScopeStack.push_back(new ParseScope(this,
- Scope::TemplateParamScope));
- unsigned NumParamLists =
- Actions.ActOnReenterTemplateScope(getCurScope(), cast<Decl>(*II));
+ for (ContainingDC CDC : reverse(DeclContextsToReenter)) {
+ TemplateParamScopeStack.push_back(
+ new ParseScope(this, Scope::TemplateParamScope));
+ unsigned NumParamLists = Actions.ActOnReenterTemplateScope(
+ getCurScope(), cast<Decl>(CDC.getDC()));
CurTemplateDepthTracker.addDepth(NumParamLists);
- if (*II != FunD) {
+ if (CDC.shouldPushDC()) {
TemplateParamScopeStack.push_back(new ParseScope(this, Scope::DeclScope));
- Actions.PushDeclContext(Actions.getCurScope(), *II);
+ Actions.PushDeclContext(Actions.getCurScope(), CDC.getDC());
}
}
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index 2b5e266104..de39e0675f 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -159,7 +159,7 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
ConsumeToken();
break;
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::kw_typeof:
case tok::kw___attribute:
case tok::kw___underlying_type: {
@@ -219,11 +219,11 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
case tok::annot_cxxscope:
ConsumeAnnotationToken();
- // Fall through.
+ LLVM_FALLTHROUGH;
default:
ConsumeAnyToken();
- if (getLangOpts().ObjC1 && Tok.is(tok::less))
+ if (getLangOpts().ObjC && Tok.is(tok::less))
return TryParseProtocolQualifiers();
break;
}
@@ -649,7 +649,7 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
return CAK_NotAttributeSpecifier;
// No tentative parsing if we don't need to look for ']]' or a lambda.
- if (!Disambiguate && !getLangOpts().ObjC1)
+ if (!Disambiguate && !getLangOpts().ObjC)
return CAK_AttributeSpecifier;
RevertingTentativeParsingAction PA(*this);
@@ -658,7 +658,7 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
ConsumeBracket();
// Outside Obj-C++11, treat anything with a matching ']]' as an attribute.
- if (!getLangOpts().ObjC1) {
+ if (!getLangOpts().ObjC) {
ConsumeBracket();
bool IsAttribute = SkipUntil(tok::r_square);
@@ -1160,8 +1160,8 @@ public:
// Reject any candidate that only resolves to instance members since they
// aren't viable as standalone identifiers instead of member references.
if (Candidate.isResolved() && !Candidate.isKeyword() &&
- std::all_of(Candidate.begin(), Candidate.end(),
- [](NamedDecl *ND) { return ND->isCXXInstanceMember(); }))
+ llvm::all_of(Candidate,
+ [](NamedDecl *ND) { return ND->isCXXInstanceMember(); }))
return false;
return CorrectionCandidateCallback::ValidateCandidate(Candidate);
@@ -1286,7 +1286,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
const Token &Next = NextToken();
// In 'foo bar', 'foo' is always a type name outside of Objective-C.
- if (!getLangOpts().ObjC1 && Next.is(tok::identifier))
+ if (!getLangOpts().ObjC && Next.is(tok::identifier))
return TPResult::True;
if (Next.isNot(tok::coloncolon) && Next.isNot(tok::less)) {
@@ -1352,8 +1352,8 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
if (Next.isOneOf(tok::kw_new, // ::new
tok::kw_delete)) // ::delete
return TPResult::False;
+ LLVM_FALLTHROUGH;
}
- // Fall through.
case tok::kw___super:
case tok::kw_decltype:
// Annotate typenames and C++ scope specifiers. If we get one, just
@@ -1559,7 +1559,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::annot_typename:
case_typename:
// In Objective-C, we might have a protocol-qualified type.
- if (getLangOpts().ObjC1 && NextToken().is(tok::less)) {
+ if (getLangOpts().ObjC && NextToken().is(tok::less)) {
// Tentatively parse the protocol qualifiers.
RevertingTentativeParsingAction PA(*this);
ConsumeAnyToken(); // The type token
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index 34ed96fb7a..a93db799f8 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -341,7 +341,7 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
case tok::semi:
if (HasFlagsSet(Flags, StopAtSemi))
return false;
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
default:
// Skip this token.
ConsumeAnyToken();
@@ -443,7 +443,7 @@ void Parser::Initialize() {
// Initialization for Objective-C context sensitive keywords recognition.
// Referenced in Parser::ParseObjCTypeQualifierList.
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
ObjCTypeQuals[objc_in] = &PP.getIdentifierTable().get("in");
ObjCTypeQuals[objc_out] = &PP.getIdentifierTable().get("out");
ObjCTypeQuals[objc_inout] = &PP.getIdentifierTable().get("inout");
@@ -747,7 +747,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
return ParseObjCAtDirectives(attrs);
case tok::minus:
case tok::plus:
- if (!getLangOpts().ObjC1) {
+ if (!getLangOpts().ObjC) {
Diag(Tok, diag::err_expected_external_declaration);
ConsumeToken();
return nullptr;
@@ -978,7 +978,7 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
// ObjC2 allows prefix attributes on class interfaces and protocols.
// FIXME: This still needs better diagnostics. We should only accept
// attributes here, no types, etc.
- if (getLangOpts().ObjC2 && Tok.is(tok::at)) {
+ if (getLangOpts().ObjC && Tok.is(tok::at)) {
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (!Tok.isObjCAtKeyword(tok::objc_interface) &&
!Tok.isObjCAtKeyword(tok::objc_protocol)) {
@@ -1522,7 +1522,7 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
// Look up and classify the identifier. We don't perform any typo-correction
// after a scope specifier, because in general we can't recover from typos
- // there (eg, after correcting 'A::tempalte B<X>::C' [sic], we would need to
+ // there (eg, after correcting 'A::template B<X>::C' [sic], we would need to
// jump back into scope specifier parsing).
Sema::NameClassification Classification = Actions.ClassifyName(
getCurScope(), SS, Name, NameLoc, Next, IsAddressOfOperand,
@@ -1554,7 +1554,7 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
/// An Objective-C object type followed by '<' is a specialization of
/// a parameterized class type or a protocol-qualified type.
ParsedType Ty = Classification.getType();
- if (getLangOpts().ObjC1 && NextToken().is(tok::less) &&
+ if (getLangOpts().ObjC && NextToken().is(tok::less) &&
(Ty.get()->isObjCObjectType() ||
Ty.get()->isObjCObjectPointerType())) {
// Consume the name.
@@ -1594,7 +1594,7 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
AnnotateScopeToken(SS, !WasScopeAnnotation);
return ANK_TemplateName;
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case Sema::NC_VarTemplate:
case Sema::NC_FunctionTemplate: {
// We have a type, variable or function template followed by '<'.
@@ -1781,7 +1781,7 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
/// An Objective-C object type followed by '<' is a specialization of
/// a parameterized class type or a protocol-qualified type.
- if (getLangOpts().ObjC1 && NextToken().is(tok::less) &&
+ if (getLangOpts().ObjC && NextToken().is(tok::less) &&
(Ty.get()->isObjCObjectType() ||
Ty.get()->isObjCObjectPointerType())) {
// Consume the name.
diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp
index 12d7a16a2f..2088d4571a 100644
--- a/lib/Rewrite/HTMLRewrite.cpp
+++ b/lib/Rewrite/HTMLRewrite.cpp
@@ -477,7 +477,7 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
// Chop off the L, u, U or 8 prefix
++TokOffs;
--TokLen;
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
case tok::string_literal:
// FIXME: Exclude the optional ud-suffix from the highlighted range.
HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
diff --git a/lib/Rewrite/RewriteRope.cpp b/lib/Rewrite/RewriteRope.cpp
index 5bc79f3edd..e3b47a1c52 100644
--- a/lib/Rewrite/RewriteRope.cpp
+++ b/lib/Rewrite/RewriteRope.cpp
@@ -59,7 +59,7 @@ using namespace clang;
///
/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
/// nodes. This directly represents a chunk of the string with those
-/// RopePieces contatenated.
+/// RopePieces concatenated.
/// RopePieceBTreeInterior - An interior node in the B+ Tree, which manages
/// up to '2*WidthFactor' other nodes in the tree.
@@ -128,7 +128,7 @@ namespace {
/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
/// nodes. This directly represents a chunk of the string with those
- /// RopePieces contatenated. Since this is a B+Tree, all values (in this case
+ /// RopePieces concatenated. Since this is a B+Tree, all values (in this case
/// instances of RopePiece) are stored in leaves like this. To make iteration
/// over the leaves efficient, they maintain a singly linked list through the
/// NextLeaf field. This allows the B+Tree forward iterator to be constant
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index ab46554485..3b6cbe9469 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -1309,11 +1309,10 @@ static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM,
case Stmt::ObjCForCollectionStmtClass:
return true;
case Stmt::DoStmtClass: {
- const Expr *Cond = cast<DoStmt>(S)->getCond();
- llvm::APSInt Val;
- if (!Cond->EvaluateAsInt(Val, Ctx))
+ Expr::EvalResult Result;
+ if (!cast<DoStmt>(S)->getCond()->EvaluateAsInt(Result, Ctx))
return true;
- return Val.getBoolValue();
+ return Result.Val.getInt().getBoolValue();
}
default:
break;
diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt
index 3d21d79f2b..5f20af01fb 100644
--- a/lib/Sema/CMakeLists.txt
+++ b/lib/Sema/CMakeLists.txt
@@ -5,6 +5,7 @@ set(LLVM_LINK_COMPONENTS
if (MSVC)
set_source_files_properties(SemaDeclAttr.cpp PROPERTIES COMPILE_FLAGS /bigobj)
set_source_files_properties(SemaExpr.cpp PROPERTIES COMPILE_FLAGS /bigobj)
+ set_source_files_properties(SemaExprCXX.cpp PROPERTIES COMPILE_FLAGS /bigobj)
set_source_files_properties(SemaTemplate.cpp PROPERTIES COMPILE_FLAGS /bigobj)
endif()
diff --git a/lib/Sema/CodeCompleteConsumer.cpp b/lib/Sema/CodeCompleteConsumer.cpp
index 754169fe5f..92e65c4b81 100644
--- a/lib/Sema/CodeCompleteConsumer.cpp
+++ b/lib/Sema/CodeCompleteConsumer.cpp
@@ -49,6 +49,8 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_Expression:
case CCC_ObjCMessageReceiver:
case CCC_ParenthesizedExpression:
+ case CCC_Symbol:
+ case CCC_SymbolOrNewName:
return true;
case CCC_TopLevel:
@@ -65,8 +67,7 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_ObjCProtocolName:
case CCC_Namespace:
case CCC_Type:
- case CCC_Name:
- case CCC_PotentiallyQualifiedName:
+ case CCC_NewName:
case CCC_MacroName:
case CCC_MacroNameUse:
case CCC_PreprocessorExpression:
@@ -128,10 +129,12 @@ StringRef clang::getCompletionKindString(CodeCompletionContext::Kind Kind) {
return "Namespace";
case CCKind::CCC_Type:
return "Type";
- case CCKind::CCC_Name:
- return "Name";
- case CCKind::CCC_PotentiallyQualifiedName:
- return "PotentiallyQualifiedName";
+ case CCKind::CCC_NewName:
+ return "NewName";
+ case CCKind::CCC_Symbol:
+ return "Symbol";
+ case CCKind::CCC_SymbolOrNewName:
+ return "SymbolOrNewName";
case CCKind::CCC_MacroName:
return "MacroName";
case CCKind::CCC_MacroNameUse:
@@ -269,23 +272,18 @@ CodeCompletionString::Chunk::CreateResultType(const char *ResultType) {
return Chunk(CK_ResultType, ResultType);
}
-CodeCompletionString::Chunk
-CodeCompletionString::Chunk::CreateCurrentParameter(
- const char *CurrentParameter) {
+CodeCompletionString::Chunk CodeCompletionString::Chunk::CreateCurrentParameter(
+ const char *CurrentParameter) {
return Chunk(CK_CurrentParameter, CurrentParameter);
}
-CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
- unsigned NumChunks,
- unsigned Priority,
- CXAvailabilityKind Availability,
- const char **Annotations,
- unsigned NumAnnotations,
- StringRef ParentName,
- const char *BriefComment)
- : NumChunks(NumChunks), NumAnnotations(NumAnnotations),
- Priority(Priority), Availability(Availability),
- ParentName(ParentName), BriefComment(BriefComment) {
+CodeCompletionString::CodeCompletionString(
+ const Chunk *Chunks, unsigned NumChunks, unsigned Priority,
+ CXAvailabilityKind Availability, const char **Annotations,
+ unsigned NumAnnotations, StringRef ParentName, const char *BriefComment)
+ : NumChunks(NumChunks), NumAnnotations(NumAnnotations), Priority(Priority),
+ Availability(Availability), ParentName(ParentName),
+ BriefComment(BriefComment) {
assert(NumChunks <= 0xffff);
assert(NumAnnotations <= 0xffff);
@@ -293,7 +291,8 @@ CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
for (unsigned I = 0; I != NumChunks; ++I)
StoredChunks[I] = Chunks[I];
- const char **StoredAnnotations = reinterpret_cast<const char **>(StoredChunks + NumChunks);
+ const char **StoredAnnotations =
+ reinterpret_cast<const char **>(StoredChunks + NumChunks);
for (unsigned I = 0; I != NumAnnotations; ++I)
StoredAnnotations[I] = Annotations[I];
}
@@ -304,7 +303,7 @@ unsigned CodeCompletionString::getAnnotationCount() const {
const char *CodeCompletionString::getAnnotation(unsigned AnnotationNr) const {
if (AnnotationNr < NumAnnotations)
- return reinterpret_cast<const char * const*>(end())[AnnotationNr];
+ return reinterpret_cast<const char *const *>(end())[AnnotationNr];
else
return nullptr;
}
@@ -313,27 +312,33 @@ std::string CodeCompletionString::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
- for (iterator C = begin(), CEnd = end(); C != CEnd; ++C) {
- switch (C->Kind) {
- case CK_Optional: OS << "{#" << C->Optional->getAsString() << "#}"; break;
- case CK_Placeholder: OS << "<#" << C->Text << "#>"; break;
-
+ for (const Chunk &C : *this) {
+ switch (C.Kind) {
+ case CK_Optional:
+ OS << "{#" << C.Optional->getAsString() << "#}";
+ break;
+ case CK_Placeholder:
+ OS << "<#" << C.Text << "#>";
+ break;
case CK_Informative:
case CK_ResultType:
- OS << "[#" << C->Text << "#]";
+ OS << "[#" << C.Text << "#]";
+ break;
+ case CK_CurrentParameter:
+ OS << "<#" << C.Text << "#>";
+ break;
+ default:
+ OS << C.Text;
break;
-
- case CK_CurrentParameter: OS << "<#" << C->Text << "#>"; break;
- default: OS << C->Text; break;
}
}
return OS.str();
}
const char *CodeCompletionString::getTypedText() const {
- for (iterator C = begin(), CEnd = end(); C != CEnd; ++C)
- if (C->Kind == CK_TypedText)
- return C->Text;
+ for (const Chunk &C : *this)
+ if (C.Kind == CK_TypedText)
+ return C.Text;
return nullptr;
}
@@ -368,7 +373,7 @@ StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
// Find the interesting names.
SmallVector<const DeclContext *, 2> Contexts;
while (DC && !DC->isFunctionOrMethod()) {
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC)) {
+ if (const auto *ND = dyn_cast<NamedDecl>(DC)) {
if (ND->getIdentifier())
Contexts.push_back(DC);
}
@@ -387,11 +392,11 @@ StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
OS << "::";
}
- const DeclContext *CurDC = Contexts[I-1];
- if (const ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(CurDC))
+ const DeclContext *CurDC = Contexts[I - 1];
+ if (const auto *CatImpl = dyn_cast<ObjCCategoryImplDecl>(CurDC))
CurDC = CatImpl->getCategoryDecl();
- if (const ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(CurDC)) {
+ if (const auto *Cat = dyn_cast<ObjCCategoryDecl>(CurDC)) {
const ObjCInterfaceDecl *Interface = Cat->getClassInterface();
if (!Interface) {
// Assign an empty StringRef but with non-null data to distinguish
@@ -417,11 +422,9 @@ CodeCompletionString *CodeCompletionBuilder::TakeString() {
sizeof(CodeCompletionString) + sizeof(Chunk) * Chunks.size() +
sizeof(const char *) * Annotations.size(),
alignof(CodeCompletionString));
- CodeCompletionString *Result
- = new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(),
- Priority, Availability,
- Annotations.data(), Annotations.size(),
- ParentName, BriefComment);
+ CodeCompletionString *Result = new (Mem) CodeCompletionString(
+ Chunks.data(), Chunks.size(), Priority, Availability, Annotations.data(),
+ Annotations.size(), ParentName, BriefComment);
Chunks.clear();
return Result;
}
@@ -450,8 +453,8 @@ void CodeCompletionBuilder::AddResultTypeChunk(const char *ResultType) {
Chunks.push_back(Chunk::CreateResultType(ResultType));
}
-void
-CodeCompletionBuilder::AddCurrentParameterChunk(const char *CurrentParameter) {
+void CodeCompletionBuilder::AddCurrentParameterChunk(
+ const char *CurrentParameter) {
Chunks.push_back(Chunk::CreateCurrentParameter(CurrentParameter));
}
@@ -481,8 +484,7 @@ void CodeCompletionBuilder::addBriefComment(StringRef Comment) {
//===----------------------------------------------------------------------===//
// Code completion overload candidate implementation
//===----------------------------------------------------------------------===//
-FunctionDecl *
-CodeCompleteConsumer::OverloadCandidate::getFunction() const {
+FunctionDecl *CodeCompleteConsumer::OverloadCandidate::getFunction() const {
if (getKind() == CK_Function)
return Function;
else if (getKind() == CK_FunctionTemplate)
@@ -498,8 +500,9 @@ CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
return Function->getType()->getAs<FunctionType>();
case CK_FunctionTemplate:
- return FunctionTemplate->getTemplatedDecl()->getType()
- ->getAs<FunctionType>();
+ return FunctionTemplate->getTemplatedDecl()
+ ->getType()
+ ->getAs<FunctionType>();
case CK_FunctionType:
return Type;
@@ -514,12 +517,12 @@ CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
CodeCompleteConsumer::~CodeCompleteConsumer() = default;
-bool PrintingCodeCompleteConsumer::isResultFilteredOut(StringRef Filter,
- CodeCompletionResult Result) {
+bool PrintingCodeCompleteConsumer::isResultFilteredOut(
+ StringRef Filter, CodeCompletionResult Result) {
switch (Result.Kind) {
case CodeCompletionResult::RK_Declaration:
return !(Result.Declaration->getIdentifier() &&
- Result.Declaration->getIdentifier()->getName().startswith(Filter));
+ Result.Declaration->getIdentifier()->getName().startswith(Filter));
case CodeCompletionResult::RK_Keyword:
return !StringRef(Result.Keyword).startswith(Filter);
case CodeCompletionResult::RK_Macro:
@@ -531,30 +534,39 @@ bool PrintingCodeCompleteConsumer::isResultFilteredOut(StringRef Filter,
llvm_unreachable("Unknown code completion result Kind.");
}
-void
-PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
- CodeCompletionContext Context,
- CodeCompletionResult *Results,
- unsigned NumResults) {
+void PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(
+ Sema &SemaRef, CodeCompletionContext Context, CodeCompletionResult *Results,
+ unsigned NumResults) {
std::stable_sort(Results, Results + NumResults);
- StringRef Filter = SemaRef.getPreprocessor().getCodeCompletionFilter();
+ if (!Context.getPreferredType().isNull())
+ OS << "PREFERRED-TYPE: " << Context.getPreferredType().getAsString()
+ << "\n";
- // Print the results.
+ StringRef Filter = SemaRef.getPreprocessor().getCodeCompletionFilter();
+ // Print the completions.
for (unsigned I = 0; I != NumResults; ++I) {
- if(!Filter.empty() && isResultFilteredOut(Filter, Results[I]))
+ if (!Filter.empty() && isResultFilteredOut(Filter, Results[I]))
continue;
OS << "COMPLETION: ";
switch (Results[I].Kind) {
case CodeCompletionResult::RK_Declaration:
OS << *Results[I].Declaration;
- if (Results[I].Hidden)
- OS << " (Hidden)";
- if (CodeCompletionString *CCS
- = Results[I].CreateCodeCompletionString(SemaRef, Context,
- getAllocator(),
- CCTUInfo,
- includeBriefComments())) {
+ {
+ std::vector<std::string> Tags;
+ if (Results[I].Hidden)
+ Tags.push_back("Hidden");
+ if (Results[I].InBaseClass)
+ Tags.push_back("InBase");
+ if (Results[I].Availability ==
+ CXAvailabilityKind::CXAvailability_NotAccessible)
+ Tags.push_back("Inaccessible");
+ if (!Tags.empty())
+ OS << " (" << llvm::join(Tags, ",") << ")";
+ }
+ if (CodeCompletionString *CCS = Results[I].CreateCodeCompletionString(
+ SemaRef, Context, getAllocator(), CCTUInfo,
+ includeBriefComments())) {
OS << " : " << CCS->getAsString();
if (const char *BriefComment = CCS->getBriefComment())
OS << " : " << BriefComment;
@@ -586,19 +598,16 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
case CodeCompletionResult::RK_Macro:
OS << Results[I].Macro->getName();
- if (CodeCompletionString *CCS
- = Results[I].CreateCodeCompletionString(SemaRef, Context,
- getAllocator(),
- CCTUInfo,
- includeBriefComments())) {
+ if (CodeCompletionString *CCS = Results[I].CreateCodeCompletionString(
+ SemaRef, Context, getAllocator(), CCTUInfo,
+ includeBriefComments())) {
OS << " : " << CCS->getAsString();
}
OS << '\n';
break;
case CodeCompletionResult::RK_Pattern:
- OS << "Pattern : "
- << Results[I].Pattern->getAsString() << '\n';
+ OS << "Pattern : " << Results[I].Pattern->getAsString() << '\n';
break;
}
}
@@ -627,7 +636,9 @@ static std::string getOverloadAsString(const CodeCompletionString &CCS) {
case CodeCompletionString::CK_Optional:
break;
- default: OS << C.Text; break;
+ default:
+ OS << C.Text;
+ break;
}
}
return OS.str();
@@ -683,7 +694,7 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
break;
}
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Declaration))
+ if (const auto *Function = dyn_cast<FunctionDecl>(Declaration))
if (Function->isDeleted())
Availability = CXAvailability_NotAvailable;
@@ -717,15 +728,15 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
/// saved into Saved and the returned StringRef will refer to it.
StringRef CodeCompletionResult::getOrderedName(std::string &Saved) const {
switch (Kind) {
- case RK_Keyword:
- return Keyword;
- case RK_Pattern:
- return Pattern->getTypedText();
- case RK_Macro:
- return Macro->getName();
- case RK_Declaration:
- // Handle declarations below.
- break;
+ case RK_Keyword:
+ return Keyword;
+ case RK_Pattern:
+ return Pattern->getTypedText();
+ case RK_Macro:
+ return Macro->getName();
+ case RK_Declaration:
+ // Handle declarations below.
+ break;
}
DeclarationName Name = Declaration->getDeclName();
@@ -735,8 +746,7 @@ StringRef CodeCompletionResult::getOrderedName(std::string &Saved) const {
if (IdentifierInfo *Id = Name.getAsIdentifierInfo())
return Id->getName();
if (Name.isObjCZeroArgSelector())
- if (IdentifierInfo *Id
- = Name.getObjCSelector().getIdentifierInfoForSlot(0))
+ if (IdentifierInfo *Id = Name.getObjCSelector().getIdentifierInfoForSlot(0))
return Id->getName();
Saved = Name.getAsString();
@@ -753,9 +763,5 @@ bool clang::operator<(const CodeCompletionResult &X,
return cmp < 0;
// If case-insensitive comparison fails, try case-sensitive comparison.
- cmp = XStr.compare(YStr);
- if (cmp)
- return cmp < 0;
-
- return false;
+ return XStr.compare(YStr) < 0;
}
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index b22eea2b36..2efa0a7fd1 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -438,7 +438,7 @@ template <class T> static bool BadSpecifier(T TNew, T TPrev,
if (TNew != TPrev)
DiagID = diag::err_invalid_decl_spec_combination;
else
- DiagID = IsExtension ? diag::ext_duplicate_declspec :
+ DiagID = IsExtension ? diag::ext_warn_duplicate_declspec :
diag::warn_duplicate_declspec;
return true;
}
@@ -566,14 +566,16 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
// these storage-class specifiers.
// OpenCL v1.2 s6.8 changes this to "The auto and register storage-class
// specifiers are not supported."
+ // OpenCL C++ v1.0 s2.9 restricts register.
if (S.getLangOpts().OpenCL &&
!S.getOpenCLOptions().isEnabled("cl_clang_storage_class_specifiers")) {
switch (SC) {
case SCS_extern:
case SCS_private_extern:
case SCS_static:
- if (S.getLangOpts().OpenCLVersion < 120) {
- DiagID = diag::err_opencl_unknown_type_specifier;
+ if (S.getLangOpts().OpenCLVersion < 120 &&
+ !S.getLangOpts().OpenCLCPlusPlus) {
+ DiagID = diag::err_opencl_unknown_type_specifier;
PrevSpec = getSpecifierName(SC);
return true;
}
@@ -967,7 +969,7 @@ bool DeclSpec::setModulePrivateSpec(SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID) {
if (isModulePrivateSpecified()) {
PrevSpec = "__module_private__";
- DiagID = diag::ext_duplicate_declspec;
+ DiagID = diag::ext_warn_duplicate_declspec;
return true;
}
diff --git a/lib/Sema/ParsedAttr.cpp b/lib/Sema/ParsedAttr.cpp
index 3dff0ad63e..59e5aab677 100644
--- a/lib/Sema/ParsedAttr.cpp
+++ b/lib/Sema/ParsedAttr.cpp
@@ -103,15 +103,31 @@ void AttributePool::takePool(AttributePool &pool) {
#include "clang/Sema/AttrParsedAttrKinds.inc"
-static StringRef normalizeAttrName(StringRef AttrName, StringRef ScopeName,
+static StringRef normalizeAttrScopeName(StringRef ScopeName,
+ ParsedAttr::Syntax SyntaxUsed) {
+ // Normalize the "__gnu__" scope name to be "gnu" and the "_Clang" scope name
+ // to be "clang".
+ if (SyntaxUsed == ParsedAttr::AS_CXX11 ||
+ SyntaxUsed == ParsedAttr::AS_C2x) {
+ if (ScopeName == "__gnu__")
+ ScopeName = "gnu";
+ else if (ScopeName == "_Clang")
+ ScopeName = "clang";
+ }
+ return ScopeName;
+}
+
+static StringRef normalizeAttrName(StringRef AttrName,
+ StringRef NormalizedScopeName,
ParsedAttr::Syntax SyntaxUsed) {
// Normalize the attribute name, __foo__ becomes foo. This is only allowable
- // for GNU attributes.
- bool IsGNU = SyntaxUsed == ParsedAttr::AS_GNU ||
- ((SyntaxUsed == ParsedAttr::AS_CXX11 ||
- SyntaxUsed == ParsedAttr::AS_C2x) &&
- ScopeName == "gnu");
- if (IsGNU && AttrName.size() >= 4 && AttrName.startswith("__") &&
+ // for GNU attributes, and attributes using the double square bracket syntax.
+ bool ShouldNormalize =
+ SyntaxUsed == ParsedAttr::AS_GNU ||
+ ((SyntaxUsed == ParsedAttr::AS_CXX11 ||
+ SyntaxUsed == ParsedAttr::AS_C2x) &&
+ (NormalizedScopeName == "gnu" || NormalizedScopeName == "clang"));
+ if (ShouldNormalize && AttrName.size() >= 4 && AttrName.startswith("__") &&
AttrName.endswith("__"))
AttrName = AttrName.slice(2, AttrName.size() - 2);
@@ -125,7 +141,7 @@ ParsedAttr::Kind ParsedAttr::getKind(const IdentifierInfo *Name,
SmallString<64> FullName;
if (ScopeName)
- FullName += ScopeName->getName();
+ FullName += normalizeAttrScopeName(ScopeName->getName(), SyntaxUsed);
AttrName = normalizeAttrName(AttrName, FullName, SyntaxUsed);
@@ -141,9 +157,10 @@ ParsedAttr::Kind ParsedAttr::getKind(const IdentifierInfo *Name,
unsigned ParsedAttr::getAttributeSpellingListIndex() const {
// Both variables will be used in tablegen generated
// attribute spell list index matching code.
- StringRef Scope = ScopeName ? ScopeName->getName() : "";
- StringRef Name = normalizeAttrName(AttrName->getName(), Scope,
- (ParsedAttr::Syntax)SyntaxUsed);
+ auto Syntax = static_cast<ParsedAttr::Syntax>(SyntaxUsed);
+ StringRef Scope =
+ ScopeName ? normalizeAttrScopeName(ScopeName->getName(), Syntax) : "";
+ StringRef Name = normalizeAttrName(AttrName->getName(), Scope, Syntax);
#include "clang/Sema/AttrSpellingListIndex.inc"
diff --git a/lib/Sema/ScopeInfo.cpp b/lib/Sema/ScopeInfo.cpp
index 62a83ccb70..bd8db6f4ed 100644
--- a/lib/Sema/ScopeInfo.cpp
+++ b/lib/Sema/ScopeInfo.cpp
@@ -54,6 +54,8 @@ void FunctionScopeInfo::Clear() {
PossiblyUnreachableDiags.clear();
WeakObjectUses.clear();
ModifiedNonNullParams.clear();
+ Blocks.clear();
+ ByrefBlockVars.clear();
}
static const NamedDecl *getBestPropertyDecl(const ObjCPropertyRefExpr *PropE) {
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index d777afe981..a8e3b85fe0 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -152,7 +152,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
NSNumberLiteralMethods[I] = nullptr;
- if (getLangOpts().ObjC1)
+ if (getLangOpts().ObjC)
NSAPIObj.reset(new NSAPI(Context));
if (getLangOpts().CPlusPlus)
@@ -167,7 +167,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
PreallocatedFunctionScope.reset(new FunctionScopeInfo(Diags));
- // Initilization of data sharing attributes stack for OpenMP
+ // Initialization of data sharing attributes stack for OpenMP
InitDataSharingAttributesStack();
std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
@@ -214,7 +214,7 @@ void Sema::Initialize() {
// Initialize predefined Objective-C types:
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
// If 'SEL' does not yet refer to any declarations, make it refer to the
// predefined 'SEL'.
DeclarationName SEL = &Context.Idents.get("SEL");
@@ -320,6 +320,10 @@ void Sema::Initialize() {
#define GENERIC_IMAGE_TYPE_EXT(Type, Id, Ext) \
setOpenCLExtensionForType(Context.Id, Ext);
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ addImplicitTypedef(#ExtType, Context.Id##Ty); \
+ setOpenCLExtensionForType(Context.Id##Ty, #Ext);
+#include "clang/Basic/OpenCLExtensionTypes.def"
};
if (Context.getTargetInfo().hasBuiltinMSVaList()) {
@@ -533,6 +537,7 @@ CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
case Type::STK_Floating: return CK_FloatingToBoolean;
case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
+ case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
}
llvm_unreachable("unknown scalar type kind");
}
@@ -644,7 +649,8 @@ void Sema::getUndefinedButUsed(
continue;
if (FD->isExternallyVisible() &&
!isExternalWithNoLinkageType(FD) &&
- !FD->getMostRecentDecl()->isInlined())
+ !FD->getMostRecentDecl()->isInlined() &&
+ !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
continue;
if (FD->getBuiltinID())
continue;
@@ -654,7 +660,8 @@ void Sema::getUndefinedButUsed(
continue;
if (VD->isExternallyVisible() &&
!isExternalWithNoLinkageType(VD) &&
- !VD->getMostRecentDecl()->isInline())
+ !VD->getMostRecentDecl()->isInline() &&
+ !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
continue;
// Skip VarDecls that lack formal definitions but which we know are in
@@ -1401,9 +1408,68 @@ void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
"Remove assertion if intentionally called in a non-lambda context.");
}
+// Check that the type of the VarDecl has an accessible copy constructor and
+// resolve its destructor's exception spefication.
+static void checkEscapingByref(VarDecl *VD, Sema &S) {
+ QualType T = VD->getType();
+ EnterExpressionEvaluationContext scope(
+ S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+ SourceLocation Loc = VD->getLocation();
+ Expr *VarRef = new (S.Context) DeclRefExpr(VD, false, T, VK_LValue, Loc);
+ ExprResult Result = S.PerformMoveOrCopyInitialization(
+ InitializedEntity::InitializeBlock(Loc, T, false), VD, VD->getType(),
+ VarRef, /*AllowNRVO=*/true);
+ if (!Result.isInvalid()) {
+ Result = S.MaybeCreateExprWithCleanups(Result);
+ Expr *Init = Result.getAs<Expr>();
+ S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
+ }
+
+ // The destructor's exception spefication is needed when IRGen generates
+ // block copy/destroy functions. Resolve it here.
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ if (CXXDestructorDecl *DD = RD->getDestructor()) {
+ auto *FPT = DD->getType()->getAs<FunctionProtoType>();
+ S.ResolveExceptionSpec(Loc, FPT);
+ }
+}
+
+static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
+ // Set the EscapingByref flag of __block variables captured by
+ // escaping blocks.
+ for (const BlockDecl *BD : FSI.Blocks) {
+ if (BD->doesNotEscape())
+ continue;
+ for (const BlockDecl::Capture &BC : BD->captures()) {
+ VarDecl *VD = BC.getVariable();
+ if (VD->hasAttr<BlocksAttr>())
+ VD->setEscapingByref();
+ }
+ }
+
+ for (VarDecl *VD : FSI.ByrefBlockVars) {
+ // __block variables might require us to capture a copy-initializer.
+ if (!VD->isEscapingByref())
+ continue;
+ // It's currently invalid to ever have a __block variable with an
+ // array type; should we diagnose that here?
+ // Regardless, we don't want to ignore array nesting when
+ // constructing this copy.
+ if (VD->getType()->isStructureOrClassType())
+ checkEscapingByref(VD, S);
+ }
+}
+
void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
const Decl *D, const BlockExpr *blkExpr) {
assert(!FunctionScopes.empty() && "mismatched push/pop!");
+
+ // This function shouldn't be called after popping the current function scope.
+ // markEscapingByrefs calls PerformMoveOrCopyInitialization, which can call
+ // PushFunctionScope, which can cause clearing out PreallocatedFunctionScope
+ // when FunctionScopes is empty.
+ markEscapingByrefs(*FunctionScopes.back(), *this);
+
FunctionScopeInfo *Scope = FunctionScopes.pop_back_val();
if (LangOpts.OpenMP)
@@ -1853,6 +1919,34 @@ void Sema::setCurrentOpenCLExtensionForDecl(Decl *D) {
setOpenCLExtensionForDecl(D, CurrOpenCLExtension);
}
+std::string Sema::getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD) {
+ if (!OpenCLDeclExtMap.empty())
+ return getOpenCLExtensionsFromExtMap(FD, OpenCLDeclExtMap);
+
+ return "";
+}
+
+std::string Sema::getOpenCLExtensionsFromTypeExtMap(FunctionType *FT) {
+ if (!OpenCLTypeExtMap.empty())
+ return getOpenCLExtensionsFromExtMap(FT, OpenCLTypeExtMap);
+
+ return "";
+}
+
+template <typename T, typename MapT>
+std::string Sema::getOpenCLExtensionsFromExtMap(T *FDT, MapT &Map) {
+ std::string ExtensionNames = "";
+ auto Loc = Map.find(FDT);
+
+ for (auto const& I : Loc->second) {
+ ExtensionNames += I;
+ ExtensionNames += " ";
+ }
+ ExtensionNames.pop_back();
+
+ return ExtensionNames;
+}
+
bool Sema::isOpenCLDisabledDecl(Decl *FD) {
auto Loc = OpenCLDeclExtMap.find(FD);
if (Loc == OpenCLDeclExtMap.end())
diff --git a/lib/Sema/SemaAccess.cpp b/lib/Sema/SemaAccess.cpp
index cf33231037..69084589ef 100644
--- a/lib/Sema/SemaAccess.cpp
+++ b/lib/Sema/SemaAccess.cpp
@@ -1877,22 +1877,31 @@ void Sema::CheckLookupAccess(const LookupResult &R) {
/// specifiers into account, but no member access expressions and such.
///
/// \param Target the declaration to check if it can be accessed
-/// \param Ctx the class/context from which to start the search
+/// \param NamingClass the class in which the lookup was started.
+/// \param BaseType type of the left side of member access expression.
+/// \p BaseType and \p NamingClass are used for C++ access control.
+/// Depending on the lookup case, they should be set to the following:
+/// - lhs.target (member access without a qualifier):
+/// \p BaseType and \p NamingClass are both the type of 'lhs'.
+/// - lhs.X::target (member access with a qualifier):
+/// BaseType is the type of 'lhs', NamingClass is 'X'
+/// - X::target (qualified lookup without member access):
+/// BaseType is null, NamingClass is 'X'.
+/// - target (unqualified lookup).
+/// BaseType is null, NamingClass is the parent class of 'target'.
/// \return true if the Target is accessible from the Class, false otherwise.
-bool Sema::IsSimplyAccessible(NamedDecl *Target, DeclContext *Ctx) {
- if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) {
- if (!Target->isCXXClassMember())
- return true;
-
- if (Target->getAccess() == AS_public)
- return true;
- QualType qType = Class->getTypeForDecl()->getCanonicalTypeInternal();
+bool Sema::IsSimplyAccessible(NamedDecl *Target, CXXRecordDecl *NamingClass,
+ QualType BaseType) {
+ // Perform the C++ accessibility checks first.
+ if (Target->isCXXClassMember() && NamingClass) {
+ if (!getLangOpts().CPlusPlus)
+ return false;
// The unprivileged access is AS_none as we don't know how the member was
// accessed, which is described by the access in DeclAccessPair.
// `IsAccessible` will examine the actual access of Target (i.e.
// Decl->getAccess()) when calculating the access.
- AccessTarget Entity(Context, AccessedEntity::Member, Class,
- DeclAccessPair::make(Target, AS_none), qType);
+ AccessTarget Entity(Context, AccessedEntity::Member, NamingClass,
+ DeclAccessPair::make(Target, AS_none), BaseType);
EffectiveContext EC(CurContext);
return ::IsAccessible(*this, EC, Entity) != ::AR_inaccessible;
}
diff --git a/lib/Sema/SemaAttr.cpp b/lib/Sema/SemaAttr.cpp
index 8024e1a051..f6ac9b44a8 100644
--- a/lib/Sema/SemaAttr.cpp
+++ b/lib/Sema/SemaAttr.cpp
@@ -520,9 +520,9 @@ attrMatcherRuleListToString(ArrayRef<attr::SubjectMatchRule> Rules) {
} // end anonymous namespace
-void Sema::ActOnPragmaAttributePush(ParsedAttr &Attribute,
- SourceLocation PragmaLoc,
- attr::ParsedSubjectMatchRuleSet Rules) {
+void Sema::ActOnPragmaAttributeAttribute(
+ ParsedAttr &Attribute, SourceLocation PragmaLoc,
+ attr::ParsedSubjectMatchRuleSet Rules) {
SmallVector<attr::SubjectMatchRule, 4> SubjectMatchRules;
// Gather the subject match rules that are supported by the attribute.
SmallVector<std::pair<attr::SubjectMatchRule, bool>, 4>
@@ -622,48 +622,64 @@ void Sema::ActOnPragmaAttributePush(ParsedAttr &Attribute,
Diagnostic << attrMatcherRuleListToString(ExtraRules);
}
- PragmaAttributeStack.push_back(
+ if (PragmaAttributeStack.empty()) {
+ Diag(PragmaLoc, diag::err_pragma_attr_attr_no_push);
+ return;
+ }
+
+ PragmaAttributeStack.back().Entries.push_back(
{PragmaLoc, &Attribute, std::move(SubjectMatchRules), /*IsUsed=*/false});
}
+void Sema::ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc) {
+ PragmaAttributeStack.emplace_back();
+ PragmaAttributeStack.back().Loc = PragmaLoc;
+}
+
void Sema::ActOnPragmaAttributePop(SourceLocation PragmaLoc) {
if (PragmaAttributeStack.empty()) {
Diag(PragmaLoc, diag::err_pragma_attribute_stack_mismatch);
return;
}
- const PragmaAttributeEntry &Entry = PragmaAttributeStack.back();
- if (!Entry.IsUsed) {
- assert(Entry.Attribute && "Expected an attribute");
- Diag(Entry.Attribute->getLoc(), diag::warn_pragma_attribute_unused)
- << Entry.Attribute->getName();
- Diag(PragmaLoc, diag::note_pragma_attribute_region_ends_here);
+
+ for (const PragmaAttributeEntry &Entry :
+ PragmaAttributeStack.back().Entries) {
+ if (!Entry.IsUsed) {
+ assert(Entry.Attribute && "Expected an attribute");
+ Diag(Entry.Attribute->getLoc(), diag::warn_pragma_attribute_unused)
+ << Entry.Attribute->getName();
+ Diag(PragmaLoc, diag::note_pragma_attribute_region_ends_here);
+ }
}
+
PragmaAttributeStack.pop_back();
}
void Sema::AddPragmaAttributes(Scope *S, Decl *D) {
if (PragmaAttributeStack.empty())
return;
- for (auto &Entry : PragmaAttributeStack) {
- ParsedAttr *Attribute = Entry.Attribute;
- assert(Attribute && "Expected an attribute");
-
- // Ensure that the attribute can be applied to the given declaration.
- bool Applies = false;
- for (const auto &Rule : Entry.MatchRules) {
- if (Attribute->appliesToDecl(D, Rule)) {
- Applies = true;
- break;
+ for (auto &Group : PragmaAttributeStack) {
+ for (auto &Entry : Group.Entries) {
+ ParsedAttr *Attribute = Entry.Attribute;
+ assert(Attribute && "Expected an attribute");
+
+ // Ensure that the attribute can be applied to the given declaration.
+ bool Applies = false;
+ for (const auto &Rule : Entry.MatchRules) {
+ if (Attribute->appliesToDecl(D, Rule)) {
+ Applies = true;
+ break;
+ }
}
+ if (!Applies)
+ continue;
+ Entry.IsUsed = true;
+ PragmaAttributeCurrentTargetDecl = D;
+ ParsedAttributesView Attrs;
+ Attrs.addAtEnd(Attribute);
+ ProcessDeclAttributeList(S, D, Attrs);
+ PragmaAttributeCurrentTargetDecl = nullptr;
}
- if (!Applies)
- continue;
- Entry.IsUsed = true;
- PragmaAttributeCurrentTargetDecl = D;
- ParsedAttributesView Attrs;
- Attrs.addAtEnd(Attribute);
- ProcessDeclAttributeList(S, D, Attrs);
- PragmaAttributeCurrentTargetDecl = nullptr;
}
}
diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp
index c2e314c245..0b4645e11c 100644
--- a/lib/Sema/SemaCast.cpp
+++ b/lib/Sema/SemaCast.cpp
@@ -131,6 +131,9 @@ namespace {
return PlaceholderKind == K;
}
+ // Language specific cast restrictions for address spaces.
+ void checkAddressSpaceCast(QualType SrcType, QualType DestType);
+
void checkCastAlign() {
Self.CheckCastAlign(SrcExpr.get(), DestType, OpRange);
}
@@ -561,7 +564,7 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
Qualifiers *CastAwayQualifiers = nullptr) {
// If the only checking we care about is for Objective-C lifetime qualifiers,
// and we're not in ObjC mode, there's nothing to check.
- if (!CheckCVR && CheckObjCLifetime && !Self.Context.getLangOpts().ObjC1)
+ if (!CheckCVR && CheckObjCLifetime && !Self.Context.getLangOpts().ObjC)
return CastAwayConstnessKind::CACK_None;
if (!DestType->isReferenceType()) {
@@ -2276,6 +2279,27 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return SuccessResult;
}
+void CastOperation::checkAddressSpaceCast(QualType SrcType, QualType DestType) {
+ // In OpenCL only conversions between pointers to objects in overlapping
+ // addr spaces are allowed. v2.0 s6.5.5 - Generic addr space overlaps
+ // with any named one, except for constant.
+ if (Self.getLangOpts().OpenCL) {
+ auto SrcPtrType = SrcType->getAs<PointerType>();
+ if (!SrcPtrType)
+ return;
+ auto DestPtrType = DestType->getAs<PointerType>();
+ if (!DestPtrType)
+ return;
+ if (!DestPtrType->isAddressSpaceOverlapping(*SrcPtrType)) {
+ Self.Diag(OpRange.getBegin(),
+ diag::err_typecheck_incompatible_address_space)
+ << SrcType << DestType << Sema::AA_Casting
+ << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ }
+ }
+}
+
void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
bool ListInitialization) {
assert(Self.getLangOpts().CPlusPlus);
@@ -2403,6 +2427,8 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
}
}
+ checkAddressSpaceCast(SrcExpr.get()->getType(), DestType);
+
if (isValidCast(tcr)) {
if (Kind == CK_BitCast)
checkCastAlign();
@@ -2489,20 +2515,9 @@ void CastOperation::CheckCStyleCast() {
assert(!SrcType->isPlaceholderType());
- // OpenCL v1 s6.5: Casting a pointer to address space A to a pointer to
- // address space B is illegal.
- if (Self.getLangOpts().OpenCL && DestType->isPointerType() &&
- SrcType->isPointerType()) {
- const PointerType *DestPtr = DestType->getAs<PointerType>();
- if (!DestPtr->isAddressSpaceOverlapping(*SrcType->getAs<PointerType>())) {
- Self.Diag(OpRange.getBegin(),
- diag::err_typecheck_incompatible_address_space)
- << SrcType << DestType << Sema::AA_Casting
- << SrcExpr.get()->getSourceRange();
- SrcExpr = ExprError();
- return;
- }
- }
+ checkAddressSpaceCast(SrcType, DestType);
+ if (SrcExpr.isInvalid())
+ return;
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
diag::err_typecheck_cast_to_incomplete)) {
@@ -2539,10 +2554,11 @@ void CastOperation::CheckCStyleCast() {
// OpenCL v2.0 s6.13.10 - Allow casts from '0' to event_t type.
if (Self.getLangOpts().OpenCL && DestType->isEventT()) {
- llvm::APSInt CastInt;
- if (SrcExpr.get()->EvaluateAsInt(CastInt, Self.Context)) {
+ Expr::EvalResult Result;
+ if (SrcExpr.get()->EvaluateAsInt(Result, Self.Context)) {
+ llvm::APSInt CastInt = Result.Val.getInt();
if (0 == CastInt) {
- Kind = CK_ZeroToOCLEvent;
+ Kind = CK_ZeroToOCLOpaqueType;
return;
}
Self.Diag(OpRange.getBegin(),
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index a258d349c6..46cac25eed 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -27,6 +27,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/FormatString.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/OperationKinds.h"
@@ -35,7 +36,6 @@
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/UnresolvedSet.h"
-#include "clang/Analysis/Analyses/FormatString.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
@@ -247,13 +247,16 @@ static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
const Expr *SizeArg = TheCall->getArg(SizeIdx);
const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx);
- llvm::APSInt Size, DstSize;
+ Expr::EvalResult SizeResult, DstSizeResult;
// find out if both sizes are known at compile time
- if (!SizeArg->EvaluateAsInt(Size, S.Context) ||
- !DstSizeArg->EvaluateAsInt(DstSize, S.Context))
+ if (!SizeArg->EvaluateAsInt(SizeResult, S.Context) ||
+ !DstSizeArg->EvaluateAsInt(DstSizeResult, S.Context))
return;
+ llvm::APSInt Size = SizeResult.Val.getInt();
+ llvm::APSInt DstSize = DstSizeResult.Val.getInt();
+
if (Size.ule(DstSize))
return;
@@ -877,6 +880,66 @@ static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
return false;
}
+static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 1))
+ return ExprError();
+
+ // Compute __builtin_launder's parameter type from the argument.
+ // The parameter type is:
+ // * The type of the argument if it's not an array or function type,
+ // Otherwise,
+ // * The decayed argument type.
+ QualType ParamTy = [&]() {
+ QualType ArgTy = TheCall->getArg(0)->getType();
+ if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
+ return S.Context.getPointerType(Ty->getElementType());
+ if (ArgTy->isFunctionType()) {
+ return S.Context.getPointerType(ArgTy);
+ }
+ return ArgTy;
+ }();
+
+ TheCall->setType(ParamTy);
+
+ auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
+ if (!ParamTy->isPointerType())
+ return 0;
+ if (ParamTy->isFunctionPointerType())
+ return 1;
+ if (ParamTy->isVoidPointerType())
+ return 2;
+ return llvm::Optional<unsigned>{};
+ }();
+ if (DiagSelect.hasValue()) {
+ S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
+ << DiagSelect.getValue() << TheCall->getSourceRange();
+ return ExprError();
+ }
+
+ // We either have an incomplete class type, or we have a class template
+ // whose instantiation has not been forced. Example:
+ //
+ // template <class T> struct Foo { T value; };
+ // Foo<int> *p = nullptr;
+ // auto *d = __builtin_launder(p);
+ if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
+ diag::err_incomplete_type))
+ return ExprError();
+
+ assert(ParamTy->getPointeeType()->isObjectType() &&
+ "Unhandled non-object pointer case");
+
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
+ ExprResult Arg =
+ S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
+ if (Arg.isInvalid())
+ return ExprError();
+ TheCall->setArg(0, Arg.get());
+
+ return TheCall;
+}
+
// Emit an error and return true if the current architecture is not in the list
// of supported architectures.
static bool
@@ -1039,6 +1102,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (checkArgCount(*this, TheCall, 1)) return true;
TheCall->setType(Context.IntTy);
break;
+ case Builtin::BI__builtin_launder:
+ return SemaBuiltinLaunder(*this, TheCall);
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
@@ -1308,7 +1373,6 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// check for the argument.
if (SemaBuiltinRWPipe(*this, TheCall))
return ExprError();
- TheCall->setType(Context.IntTy);
break;
case Builtin::BIreserve_read_pipe:
case Builtin::BIreserve_write_pipe:
@@ -1340,7 +1404,6 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BIget_pipe_max_packets:
if (SemaBuiltinPipePackets(*this, TheCall))
return ExprError();
- TheCall->setType(Context.UnsignedIntTy);
break;
case Builtin::BIto_global:
case Builtin::BIto_local:
@@ -1749,6 +1812,16 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
BuiltinID == AArch64::BI__builtin_arm_wsrp)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+ // Only check the valid encoding range. Any constant in this range would be
+ // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
+ // an exception for incorrect registers. This matches MSVC behavior.
+ if (BuiltinID == AArch64::BI_ReadStatusReg ||
+ BuiltinID == AArch64::BI_WriteStatusReg)
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
+
+ if (BuiltinID == AArch64::BI__getReg)
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
+
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
return true;
@@ -1766,777 +1839,814 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
}
bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
- static const std::map<unsigned, std::vector<StringRef>> ValidCPU = {
- { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, {"v62", "v65"} },
+ struct BuiltinAndString {
+ unsigned BuiltinID;
+ const char *Str;
};
- static const std::map<unsigned, std::vector<StringRef>> ValidHVX = {
- { Hexagon::BI__builtin_HEXAGON_V6_extractw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_hi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lo, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_not, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vand, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vassign, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vassignp, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgb, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguw, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcombine, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vd0, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdd0, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdelta, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlut4, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmux, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnot, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackob, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vror, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsathub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vswap, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vxor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vzb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vzh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, {"v60", "v62", "v65"} },
+ static BuiltinAndString ValidCPU[] = {
+ { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" },
+ };
+
+ static BuiltinAndString ValidHVX[] = {
+ { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" },
+ };
+
+ // Sort the tables on first execution so we can binary search them.
+ auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) {
+ return LHS.BuiltinID < RHS.BuiltinID;
+ };
+ static const bool SortOnce =
+ (std::sort(std::begin(ValidCPU), std::end(ValidCPU), SortCmp),
+ std::sort(std::begin(ValidHVX), std::end(ValidHVX), SortCmp), true);
+ (void)SortOnce;
+ auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) {
+ return BI.BuiltinID < BuiltinID;
};
const TargetInfo &TI = Context.getTargetInfo();
- auto FC = ValidCPU.find(BuiltinID);
- if (FC != ValidCPU.end()) {
+ const BuiltinAndString *FC =
+ std::lower_bound(std::begin(ValidCPU), std::end(ValidCPU), BuiltinID,
+ LowerBoundCmp);
+ if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) {
const TargetOptions &Opts = TI.getTargetOpts();
StringRef CPU = Opts.CPU;
if (!CPU.empty()) {
assert(CPU.startswith("hexagon") && "Unexpected CPU name");
CPU.consume_front("hexagon");
- if (llvm::none_of(FC->second, [CPU](StringRef S) { return S == CPU; }))
+ SmallVector<StringRef, 3> CPUs;
+ StringRef(FC->Str).split(CPUs, ',');
+ if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; }))
return Diag(TheCall->getBeginLoc(),
diag::err_hexagon_builtin_unsupported_cpu);
}
}
- auto FH = ValidHVX.find(BuiltinID);
- if (FH != ValidHVX.end()) {
+ const BuiltinAndString *FH =
+ std::lower_bound(std::begin(ValidHVX), std::end(ValidHVX), BuiltinID,
+ LowerBoundCmp);
+ if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) {
if (!TI.hasFeature("hvx"))
return Diag(TheCall->getBeginLoc(),
diag::err_hexagon_builtin_requires_hvx);
- bool IsValid = llvm::any_of(FH->second,
+ SmallVector<StringRef, 3> HVXs;
+ StringRef(FH->Str).split(HVXs, ',');
+ bool IsValid = llvm::any_of(HVXs,
[&TI] (StringRef V) {
std::string F = "hvx" + V.str();
return TI.hasFeature(F);
@@ -2551,15 +2661,17 @@ bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
struct ArgInfo {
- ArgInfo(unsigned O, bool S, unsigned W, unsigned A)
- : OpNum(O), IsSigned(S), BitWidth(W), Align(A) {}
- unsigned OpNum = 0;
- bool IsSigned = false;
- unsigned BitWidth = 0;
- unsigned Align = 0;
+ uint8_t OpNum;
+ bool IsSigned;
+ uint8_t BitWidth;
+ uint8_t Align;
+ };
+ struct BuiltinInfo {
+ unsigned BuiltinID;
+ ArgInfo Infos[2];
};
- static const std::map<unsigned, std::vector<ArgInfo>> Infos = {
+ static BuiltinInfo Infos[] = {
{ Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
{ Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
{ Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
@@ -2745,15 +2857,33 @@ bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
{{ 3, false, 1, 0 }} },
};
- auto F = Infos.find(BuiltinID);
- if (F == Infos.end())
+ // Use a dynamically initialized static to sort the table exactly once on
+ // first run.
+ static const bool SortOnce =
+ (std::sort(std::begin(Infos), std::end(Infos),
+ [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
+ return LHS.BuiltinID < RHS.BuiltinID;
+ }),
+ true);
+ (void)SortOnce;
+
+ const BuiltinInfo *F =
+ std::lower_bound(std::begin(Infos), std::end(Infos), BuiltinID,
+ [](const BuiltinInfo &BI, unsigned BuiltinID) {
+ return BI.BuiltinID < BuiltinID;
+ });
+ if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
return false;
bool Error = false;
- for (const ArgInfo &A : F->second) {
- int32_t Min = A.IsSigned ? -(1 << (A.BitWidth-1)) : 0;
- int32_t Max = (1 << (A.IsSigned ? A.BitWidth-1 : A.BitWidth)) - 1;
+ for (const ArgInfo &A : F->Infos) {
+ // Ignore empty ArgInfo elements.
+ if (A.BitWidth == 0)
+ continue;
+
+ int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
+ int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
if (!A.Align) {
Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
} else {
@@ -2794,7 +2924,7 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
- // MSA instrinsics. Instructions (which the intrinsics maps to) which use the
+ // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
// df/m field.
// These intrinsics take an unsigned 3 bit immediate.
case Mips::BI__builtin_msa_bclri_b:
@@ -2937,14 +3067,14 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Mips::BI__builtin_msa_ldi_h:
case Mips::BI__builtin_msa_ldi_w:
case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
- case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 16; break;
- case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 16; break;
- case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 16; break;
- case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 16; break;
- case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 16; break;
- case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 16; break;
- case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 16; break;
- case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 16; break;
+ case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
+ case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
+ case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
+ case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
+ case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
}
if (!m)
@@ -4110,7 +4240,7 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
CheckAbsoluteValueFunction(TheCall, FDecl);
CheckMaxUnsignedZero(TheCall, FDecl);
- if (getLangOpts().ObjC1)
+ if (getLangOpts().ObjC)
DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
unsigned CMId = FDecl->getMemoryFunctionKind();
@@ -5692,7 +5822,8 @@ bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
if (const auto *UE =
dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
- if (UE->getKind() == UETT_AlignOf)
+ if (UE->getKind() == UETT_AlignOf ||
+ UE->getKind() == UETT_PreferredAlignOf)
Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof)
<< Arg->getSourceRange();
@@ -6427,13 +6558,12 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
return SLCT_NotALiteral;
}
case Stmt::BinaryOperatorClass: {
- llvm::APSInt LResult;
- llvm::APSInt RResult;
-
const BinaryOperator *BinOp = cast<BinaryOperator>(E);
// A string literal + an int offset is still a string literal.
if (BinOp->isAdditiveOp()) {
+ Expr::EvalResult LResult, RResult;
+
bool LIsInt = BinOp->getLHS()->EvaluateAsInt(LResult, S.Context);
bool RIsInt = BinOp->getRHS()->EvaluateAsInt(RResult, S.Context);
@@ -6442,12 +6572,12 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
if (LIsInt) {
if (BinOpKind == BO_Add) {
- sumOffsets(Offset, LResult, BinOpKind, RIsInt);
+ sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt);
E = BinOp->getRHS();
goto tryAgain;
}
} else {
- sumOffsets(Offset, RResult, BinOpKind, RIsInt);
+ sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt);
E = BinOp->getLHS();
goto tryAgain;
}
@@ -6460,9 +6590,10 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
const UnaryOperator *UnaOp = cast<UnaryOperator>(E);
auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
- llvm::APSInt IndexResult;
+ Expr::EvalResult IndexResult;
if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context)) {
- sumOffsets(Offset, IndexResult, BO_Add, /*RHS is int*/ true);
+ sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add,
+ /*RHS is int*/ true);
E = ASE->getBase();
goto tryAgain;
}
@@ -7079,6 +7210,8 @@ public:
const char *startSpecifier,
unsigned specifierLen) override;
+ void handleInvalidMaskType(StringRef MaskType) override;
+
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
const char *startSpecifier,
unsigned specifierLen) override;
@@ -7130,6 +7263,10 @@ bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
CS.getStart(), CS.getLength());
}
+void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) {
+ S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size);
+}
+
bool CheckPrintfHandler::HandleAmount(
const analyze_format_string::OptionalAmount &Amt,
unsigned k, const char *startSpecifier,
@@ -10201,8 +10338,8 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
Expr *OriginalInit = Init->IgnoreParenImpCasts();
unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
- llvm::APSInt Value;
- if (!OriginalInit->EvaluateAsInt(Value, S.Context,
+ Expr::EvalResult Result;
+ if (!OriginalInit->EvaluateAsInt(Result, S.Context,
Expr::SE_AllowSideEffects)) {
// The RHS is not constant. If the RHS has an enum type, make sure the
// bitfield is wide enough to hold all the values of the enum without
@@ -10258,6 +10395,8 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
return false;
}
+ llvm::APSInt Value = Result.Val.getInt();
+
unsigned OriginalWidth = Value.getBitWidth();
if (!Value.isSigned() || Value.isNegative())
@@ -10310,7 +10449,7 @@ static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
}
AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
-
+
// Diagnose implicitly sequentially-consistent atomic assignment.
if (E->getLHS()->getType()->isAtomicType())
S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
@@ -10870,8 +11009,11 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
if (SourceRange.Width > TargetRange.Width) {
// If the source is a constant, use a default-on diagnostic.
// TODO: this should happen for bitfield stores, too.
- llvm::APSInt Value(32);
- if (E->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects)) {
+ Expr::EvalResult Result;
+ if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
+ llvm::APSInt Value(32);
+ Value = Result.Val.getInt();
+
if (S.SourceMgr.isInSystemMacro(CC))
return;
@@ -10896,15 +11038,29 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
}
+ if (TargetRange.Width > SourceRange.Width) {
+ if (auto *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_Minus)
+ if (Source->isUnsignedIntegerType()) {
+ if (Target->isUnsignedIntegerType())
+ return DiagnoseImpCast(S, E, T, CC,
+ diag::warn_impcast_high_order_zero_bits);
+ if (Target->isSignedIntegerType())
+ return DiagnoseImpCast(S, E, T, CC,
+ diag::warn_impcast_nonnegative_result);
+ }
+ }
+
if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative &&
SourceRange.NonNegative && Source->isSignedIntegerType()) {
// Warn when doing a signed to signed conversion, warn if the positive
// source value is exactly the width of the target type, which will
// cause a negative value to be stored.
- llvm::APSInt Value;
- if (E->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects) &&
+ Expr::EvalResult Result;
+ if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) &&
!S.SourceMgr.isInSystemMacro(CC)) {
+ llvm::APSInt Value = Result.Val.getInt();
if (isSameWidthConstantConversion(S, E, T, CC)) {
std::string PrettySourceValue = Value.toString(10);
std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
@@ -12059,6 +12215,18 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
if (!Param->getType().isConstQualified())
Diag(Param->getLocation(), diag::err_attribute_pointers_only)
<< Attr->getSpelling() << 1;
+
+ // Check for parameter names shadowing fields from the class.
+ if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) {
+ // The owning context for the parameter should be the function, but we
+ // want to see if this function's declaration context is a record.
+ DeclContext *DC = Param->getDeclContext();
+ if (DC && DC->isFunctionOrMethod()) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent()))
+ CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(),
+ RD, /*DeclIsField*/ false);
+ }
+ }
}
return HasInvalidParm;
@@ -12191,9 +12359,11 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
if (!ArrayTy)
return;
- llvm::APSInt index;
- if (!IndexExpr->EvaluateAsInt(index, Context, Expr::SE_AllowSideEffects))
+ Expr::EvalResult Result;
+ if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects))
return;
+
+ llvm::APSInt index = Result.Val.getInt();
if (IndexNegated)
index = -index;
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index acf43650bb..773dd46ac4 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -10,6 +10,8 @@
// This file defines the code-completion semantic actions.
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
@@ -42,315 +44,309 @@ using namespace clang;
using namespace sema;
namespace {
- /// A container of code-completion results.
- class ResultBuilder {
- public:
- /// The type of a name-lookup filter, which can be provided to the
- /// name-lookup routines to specify which declarations should be included in
- /// the result set (when it returns true) and which declarations should be
- /// filtered out (returns false).
- typedef bool (ResultBuilder::*LookupFilter)(const NamedDecl *) const;
-
- typedef CodeCompletionResult Result;
+/// A container of code-completion results.
+class ResultBuilder {
+public:
+ /// The type of a name-lookup filter, which can be provided to the
+ /// name-lookup routines to specify which declarations should be included in
+ /// the result set (when it returns true) and which declarations should be
+ /// filtered out (returns false).
+ typedef bool (ResultBuilder::*LookupFilter)(const NamedDecl *) const;
- private:
- /// The actual results we have found.
- std::vector<Result> Results;
+ typedef CodeCompletionResult Result;
- /// A record of all of the declarations we have found and placed
- /// into the result set, used to ensure that no declaration ever gets into
- /// the result set twice.
- llvm::SmallPtrSet<const Decl*, 16> AllDeclsFound;
+private:
+ /// The actual results we have found.
+ std::vector<Result> Results;
- typedef std::pair<const NamedDecl *, unsigned> DeclIndexPair;
+ /// A record of all of the declarations we have found and placed
+ /// into the result set, used to ensure that no declaration ever gets into
+ /// the result set twice.
+ llvm::SmallPtrSet<const Decl *, 16> AllDeclsFound;
- /// An entry in the shadow map, which is optimized to store
- /// a single (declaration, index) mapping (the common case) but
- /// can also store a list of (declaration, index) mappings.
- class ShadowMapEntry {
- typedef SmallVector<DeclIndexPair, 4> DeclIndexPairVector;
+ typedef std::pair<const NamedDecl *, unsigned> DeclIndexPair;
- /// Contains either the solitary NamedDecl * or a vector
- /// of (declaration, index) pairs.
- llvm::PointerUnion<const NamedDecl *, DeclIndexPairVector*> DeclOrVector;
+ /// An entry in the shadow map, which is optimized to store
+ /// a single (declaration, index) mapping (the common case) but
+ /// can also store a list of (declaration, index) mappings.
+ class ShadowMapEntry {
+ typedef SmallVector<DeclIndexPair, 4> DeclIndexPairVector;
- /// When the entry contains a single declaration, this is
- /// the index associated with that entry.
- unsigned SingleDeclIndex;
+ /// Contains either the solitary NamedDecl * or a vector
+ /// of (declaration, index) pairs.
+ llvm::PointerUnion<const NamedDecl *, DeclIndexPairVector *> DeclOrVector;
- public:
- ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) { }
+ /// When the entry contains a single declaration, this is
+ /// the index associated with that entry.
+ unsigned SingleDeclIndex;
- void Add(const NamedDecl *ND, unsigned Index) {
- if (DeclOrVector.isNull()) {
- // 0 - > 1 elements: just set the single element information.
- DeclOrVector = ND;
- SingleDeclIndex = Index;
- return;
- }
+ public:
+ ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) {}
- if (const NamedDecl *PrevND =
- DeclOrVector.dyn_cast<const NamedDecl *>()) {
- // 1 -> 2 elements: create the vector of results and push in the
- // existing declaration.
- DeclIndexPairVector *Vec = new DeclIndexPairVector;
- Vec->push_back(DeclIndexPair(PrevND, SingleDeclIndex));
- DeclOrVector = Vec;
- }
+ void Add(const NamedDecl *ND, unsigned Index) {
+ if (DeclOrVector.isNull()) {
+ // 0 - > 1 elements: just set the single element information.
+ DeclOrVector = ND;
+ SingleDeclIndex = Index;
+ return;
+ }
- // Add the new element to the end of the vector.
- DeclOrVector.get<DeclIndexPairVector*>()->push_back(
- DeclIndexPair(ND, Index));
+ if (const NamedDecl *PrevND =
+ DeclOrVector.dyn_cast<const NamedDecl *>()) {
+ // 1 -> 2 elements: create the vector of results and push in the
+ // existing declaration.
+ DeclIndexPairVector *Vec = new DeclIndexPairVector;
+ Vec->push_back(DeclIndexPair(PrevND, SingleDeclIndex));
+ DeclOrVector = Vec;
}
- void Destroy() {
- if (DeclIndexPairVector *Vec
- = DeclOrVector.dyn_cast<DeclIndexPairVector *>()) {
- delete Vec;
- DeclOrVector = ((NamedDecl *)nullptr);
- }
+ // Add the new element to the end of the vector.
+ DeclOrVector.get<DeclIndexPairVector *>()->push_back(
+ DeclIndexPair(ND, Index));
+ }
+
+ void Destroy() {
+ if (DeclIndexPairVector *Vec =
+ DeclOrVector.dyn_cast<DeclIndexPairVector *>()) {
+ delete Vec;
+ DeclOrVector = ((NamedDecl *)nullptr);
}
+ }
- // Iteration.
- class iterator;
- iterator begin() const;
- iterator end() const;
- };
+ // Iteration.
+ class iterator;
+ iterator begin() const;
+ iterator end() const;
+ };
- /// A mapping from declaration names to the declarations that have
- /// this name within a particular scope and their index within the list of
- /// results.
- typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
+ /// A mapping from declaration names to the declarations that have
+ /// this name within a particular scope and their index within the list of
+ /// results.
+ typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
- /// The semantic analysis object for which results are being
- /// produced.
- Sema &SemaRef;
+ /// The semantic analysis object for which results are being
+ /// produced.
+ Sema &SemaRef;
- /// The allocator used to allocate new code-completion strings.
- CodeCompletionAllocator &Allocator;
+ /// The allocator used to allocate new code-completion strings.
+ CodeCompletionAllocator &Allocator;
- CodeCompletionTUInfo &CCTUInfo;
+ CodeCompletionTUInfo &CCTUInfo;
- /// If non-NULL, a filter function used to remove any code-completion
- /// results that are not desirable.
- LookupFilter Filter;
+ /// If non-NULL, a filter function used to remove any code-completion
+ /// results that are not desirable.
+ LookupFilter Filter;
- /// Whether we should allow declarations as
- /// nested-name-specifiers that would otherwise be filtered out.
- bool AllowNestedNameSpecifiers;
+ /// Whether we should allow declarations as
+ /// nested-name-specifiers that would otherwise be filtered out.
+ bool AllowNestedNameSpecifiers;
- /// If set, the type that we would prefer our resulting value
- /// declarations to have.
- ///
- /// Closely matching the preferred type gives a boost to a result's
- /// priority.
- CanQualType PreferredType;
+ /// If set, the type that we would prefer our resulting value
+ /// declarations to have.
+ ///
+ /// Closely matching the preferred type gives a boost to a result's
+ /// priority.
+ CanQualType PreferredType;
- /// A list of shadow maps, which is used to model name hiding at
- /// different levels of, e.g., the inheritance hierarchy.
- std::list<ShadowMap> ShadowMaps;
+ /// A list of shadow maps, which is used to model name hiding at
+ /// different levels of, e.g., the inheritance hierarchy.
+ std::list<ShadowMap> ShadowMaps;
- /// If we're potentially referring to a C++ member function, the set
- /// of qualifiers applied to the object type.
- Qualifiers ObjectTypeQualifiers;
+ /// If we're potentially referring to a C++ member function, the set
+ /// of qualifiers applied to the object type.
+ Qualifiers ObjectTypeQualifiers;
- /// Whether the \p ObjectTypeQualifiers field is active.
- bool HasObjectTypeQualifiers;
+ /// Whether the \p ObjectTypeQualifiers field is active.
+ bool HasObjectTypeQualifiers;
- /// The selector that we prefer.
- Selector PreferredSelector;
+ /// The selector that we prefer.
+ Selector PreferredSelector;
- /// The completion context in which we are gathering results.
- CodeCompletionContext CompletionContext;
+ /// The completion context in which we are gathering results.
+ CodeCompletionContext CompletionContext;
- /// If we are in an instance method definition, the \@implementation
- /// object.
- ObjCImplementationDecl *ObjCImplementation;
+ /// If we are in an instance method definition, the \@implementation
+ /// object.
+ ObjCImplementationDecl *ObjCImplementation;
- void AdjustResultPriorityForDecl(Result &R);
+ void AdjustResultPriorityForDecl(Result &R);
- void MaybeAddConstructorResults(Result R);
+ void MaybeAddConstructorResults(Result R);
- public:
- explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- const CodeCompletionContext &CompletionContext,
- LookupFilter Filter = nullptr)
+public:
+ explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ const CodeCompletionContext &CompletionContext,
+ LookupFilter Filter = nullptr)
: SemaRef(SemaRef), Allocator(Allocator), CCTUInfo(CCTUInfo),
- Filter(Filter),
- AllowNestedNameSpecifiers(false), HasObjectTypeQualifiers(false),
- CompletionContext(CompletionContext),
- ObjCImplementation(nullptr)
- {
- // If this is an Objective-C instance method definition, dig out the
- // corresponding implementation.
- switch (CompletionContext.getKind()) {
- case CodeCompletionContext::CCC_Expression:
- case CodeCompletionContext::CCC_ObjCMessageReceiver:
- case CodeCompletionContext::CCC_ParenthesizedExpression:
- case CodeCompletionContext::CCC_Statement:
- case CodeCompletionContext::CCC_Recovery:
- if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
- if (Method->isInstanceMethod())
- if (ObjCInterfaceDecl *Interface = Method->getClassInterface())
- ObjCImplementation = Interface->getImplementation();
- break;
+ Filter(Filter), AllowNestedNameSpecifiers(false),
+ HasObjectTypeQualifiers(false), CompletionContext(CompletionContext),
+ ObjCImplementation(nullptr) {
+ // If this is an Objective-C instance method definition, dig out the
+ // corresponding implementation.
+ switch (CompletionContext.getKind()) {
+ case CodeCompletionContext::CCC_Expression:
+ case CodeCompletionContext::CCC_ObjCMessageReceiver:
+ case CodeCompletionContext::CCC_ParenthesizedExpression:
+ case CodeCompletionContext::CCC_Statement:
+ case CodeCompletionContext::CCC_Recovery:
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
+ if (Method->isInstanceMethod())
+ if (ObjCInterfaceDecl *Interface = Method->getClassInterface())
+ ObjCImplementation = Interface->getImplementation();
+ break;
- default:
- break;
- }
- }
-
- /// Determine the priority for a reference to the given declaration.
- unsigned getBasePriority(const NamedDecl *D);
-
- /// Whether we should include code patterns in the completion
- /// results.
- bool includeCodePatterns() const {
- return SemaRef.CodeCompleter &&
- SemaRef.CodeCompleter->includeCodePatterns();
- }
-
- /// Set the filter used for code-completion results.
- void setFilter(LookupFilter Filter) {
- this->Filter = Filter;
- }
-
- Result *data() { return Results.empty()? nullptr : &Results.front(); }
- unsigned size() const { return Results.size(); }
- bool empty() const { return Results.empty(); }
-
- /// Specify the preferred type.
- void setPreferredType(QualType T) {
- PreferredType = SemaRef.Context.getCanonicalType(T);
+ default:
+ break;
}
+ }
- /// Set the cv-qualifiers on the object type, for us in filtering
- /// calls to member functions.
- ///
- /// When there are qualifiers in this set, they will be used to filter
- /// out member functions that aren't available (because there will be a
- /// cv-qualifier mismatch) or prefer functions with an exact qualifier
- /// match.
- void setObjectTypeQualifiers(Qualifiers Quals) {
- ObjectTypeQualifiers = Quals;
- HasObjectTypeQualifiers = true;
- }
-
- /// Set the preferred selector.
- ///
- /// When an Objective-C method declaration result is added, and that
- /// method's selector matches this preferred selector, we give that method
- /// a slight priority boost.
- void setPreferredSelector(Selector Sel) {
- PreferredSelector = Sel;
- }
-
- /// Retrieve the code-completion context for which results are
- /// being collected.
- const CodeCompletionContext &getCompletionContext() const {
- return CompletionContext;
- }
-
- /// Specify whether nested-name-specifiers are allowed.
- void allowNestedNameSpecifiers(bool Allow = true) {
- AllowNestedNameSpecifiers = Allow;
- }
-
- /// Return the semantic analysis object for which we are collecting
- /// code completion results.
- Sema &getSema() const { return SemaRef; }
-
- /// Retrieve the allocator used to allocate code completion strings.
- CodeCompletionAllocator &getAllocator() const { return Allocator; }
-
- CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
-
- /// Determine whether the given declaration is at all interesting
- /// as a code-completion result.
- ///
- /// \param ND the declaration that we are inspecting.
- ///
- /// \param AsNestedNameSpecifier will be set true if this declaration is
- /// only interesting when it is a nested-name-specifier.
- bool isInterestingDecl(const NamedDecl *ND,
- bool &AsNestedNameSpecifier) const;
-
- /// Check whether the result is hidden by the Hiding declaration.
- ///
- /// \returns true if the result is hidden and cannot be found, false if
- /// the hidden result could still be found. When false, \p R may be
- /// modified to describe how the result can be found (e.g., via extra
- /// qualification).
- bool CheckHiddenResult(Result &R, DeclContext *CurContext,
- const NamedDecl *Hiding);
-
- /// Add a new result to this result set (if it isn't already in one
- /// of the shadow maps), or replace an existing result (for, e.g., a
- /// redeclaration).
- ///
- /// \param R the result to add (if it is unique).
- ///
- /// \param CurContext the context in which this result will be named.
- void MaybeAddResult(Result R, DeclContext *CurContext = nullptr);
-
- /// Add a new result to this result set, where we already know
- /// the hiding declaration (if any).
- ///
- /// \param R the result to add (if it is unique).
- ///
- /// \param CurContext the context in which this result will be named.
- ///
- /// \param Hiding the declaration that hides the result.
- ///
- /// \param InBaseClass whether the result was found in a base
- /// class of the searched context.
- void AddResult(Result R, DeclContext *CurContext, NamedDecl *Hiding,
- bool InBaseClass);
-
- /// Add a new non-declaration result to this result set.
- void AddResult(Result R);
-
- /// Enter into a new scope.
- void EnterNewScope();
-
- /// Exit from the current scope.
- void ExitScope();
-
- /// Ignore this declaration, if it is seen again.
- void Ignore(const Decl *D) { AllDeclsFound.insert(D->getCanonicalDecl()); }
-
- /// Add a visited context.
- void addVisitedContext(DeclContext *Ctx) {
- CompletionContext.addVisitedContext(Ctx);
- }
-
- /// \name Name lookup predicates
- ///
- /// These predicates can be passed to the name lookup functions to filter the
- /// results of name lookup. All of the predicates have the same type, so that
- ///
- //@{
- bool IsOrdinaryName(const NamedDecl *ND) const;
- bool IsOrdinaryNonTypeName(const NamedDecl *ND) const;
- bool IsIntegralConstantValue(const NamedDecl *ND) const;
- bool IsOrdinaryNonValueName(const NamedDecl *ND) const;
- bool IsNestedNameSpecifier(const NamedDecl *ND) const;
- bool IsEnum(const NamedDecl *ND) const;
- bool IsClassOrStruct(const NamedDecl *ND) const;
- bool IsUnion(const NamedDecl *ND) const;
- bool IsNamespace(const NamedDecl *ND) const;
- bool IsNamespaceOrAlias(const NamedDecl *ND) const;
- bool IsType(const NamedDecl *ND) const;
- bool IsMember(const NamedDecl *ND) const;
- bool IsObjCIvar(const NamedDecl *ND) const;
- bool IsObjCMessageReceiver(const NamedDecl *ND) const;
- bool IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const;
- bool IsObjCCollection(const NamedDecl *ND) const;
- bool IsImpossibleToSatisfy(const NamedDecl *ND) const;
- //@}
- };
-}
+ /// Determine the priority for a reference to the given declaration.
+ unsigned getBasePriority(const NamedDecl *D);
+
+ /// Whether we should include code patterns in the completion
+ /// results.
+ bool includeCodePatterns() const {
+ return SemaRef.CodeCompleter &&
+ SemaRef.CodeCompleter->includeCodePatterns();
+ }
+
+ /// Set the filter used for code-completion results.
+ void setFilter(LookupFilter Filter) { this->Filter = Filter; }
+
+ Result *data() { return Results.empty() ? nullptr : &Results.front(); }
+ unsigned size() const { return Results.size(); }
+ bool empty() const { return Results.empty(); }
+
+ /// Specify the preferred type.
+ void setPreferredType(QualType T) {
+ PreferredType = SemaRef.Context.getCanonicalType(T);
+ }
+
+ /// Set the cv-qualifiers on the object type, for us in filtering
+ /// calls to member functions.
+ ///
+ /// When there are qualifiers in this set, they will be used to filter
+ /// out member functions that aren't available (because there will be a
+ /// cv-qualifier mismatch) or prefer functions with an exact qualifier
+ /// match.
+ void setObjectTypeQualifiers(Qualifiers Quals) {
+ ObjectTypeQualifiers = Quals;
+ HasObjectTypeQualifiers = true;
+ }
+
+ /// Set the preferred selector.
+ ///
+ /// When an Objective-C method declaration result is added, and that
+ /// method's selector matches this preferred selector, we give that method
+ /// a slight priority boost.
+ void setPreferredSelector(Selector Sel) { PreferredSelector = Sel; }
+
+ /// Retrieve the code-completion context for which results are
+ /// being collected.
+ const CodeCompletionContext &getCompletionContext() const {
+ return CompletionContext;
+ }
+
+ /// Specify whether nested-name-specifiers are allowed.
+ void allowNestedNameSpecifiers(bool Allow = true) {
+ AllowNestedNameSpecifiers = Allow;
+ }
+
+ /// Return the semantic analysis object for which we are collecting
+ /// code completion results.
+ Sema &getSema() const { return SemaRef; }
+
+ /// Retrieve the allocator used to allocate code completion strings.
+ CodeCompletionAllocator &getAllocator() const { return Allocator; }
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
+
+ /// Determine whether the given declaration is at all interesting
+ /// as a code-completion result.
+ ///
+ /// \param ND the declaration that we are inspecting.
+ ///
+ /// \param AsNestedNameSpecifier will be set true if this declaration is
+ /// only interesting when it is a nested-name-specifier.
+ bool isInterestingDecl(const NamedDecl *ND,
+ bool &AsNestedNameSpecifier) const;
+
+ /// Check whether the result is hidden by the Hiding declaration.
+ ///
+ /// \returns true if the result is hidden and cannot be found, false if
+ /// the hidden result could still be found. When false, \p R may be
+ /// modified to describe how the result can be found (e.g., via extra
+ /// qualification).
+ bool CheckHiddenResult(Result &R, DeclContext *CurContext,
+ const NamedDecl *Hiding);
+
+ /// Add a new result to this result set (if it isn't already in one
+ /// of the shadow maps), or replace an existing result (for, e.g., a
+ /// redeclaration).
+ ///
+ /// \param R the result to add (if it is unique).
+ ///
+ /// \param CurContext the context in which this result will be named.
+ void MaybeAddResult(Result R, DeclContext *CurContext = nullptr);
+
+ /// Add a new result to this result set, where we already know
+ /// the hiding declaration (if any).
+ ///
+ /// \param R the result to add (if it is unique).
+ ///
+ /// \param CurContext the context in which this result will be named.
+ ///
+ /// \param Hiding the declaration that hides the result.
+ ///
+ /// \param InBaseClass whether the result was found in a base
+ /// class of the searched context.
+ void AddResult(Result R, DeclContext *CurContext, NamedDecl *Hiding,
+ bool InBaseClass);
+
+ /// Add a new non-declaration result to this result set.
+ void AddResult(Result R);
+
+ /// Enter into a new scope.
+ void EnterNewScope();
+
+ /// Exit from the current scope.
+ void ExitScope();
+
+ /// Ignore this declaration, if it is seen again.
+ void Ignore(const Decl *D) { AllDeclsFound.insert(D->getCanonicalDecl()); }
+
+ /// Add a visited context.
+ void addVisitedContext(DeclContext *Ctx) {
+ CompletionContext.addVisitedContext(Ctx);
+ }
+
+ /// \name Name lookup predicates
+ ///
+ /// These predicates can be passed to the name lookup functions to filter the
+ /// results of name lookup. All of the predicates have the same type, so that
+ ///
+ //@{
+ bool IsOrdinaryName(const NamedDecl *ND) const;
+ bool IsOrdinaryNonTypeName(const NamedDecl *ND) const;
+ bool IsIntegralConstantValue(const NamedDecl *ND) const;
+ bool IsOrdinaryNonValueName(const NamedDecl *ND) const;
+ bool IsNestedNameSpecifier(const NamedDecl *ND) const;
+ bool IsEnum(const NamedDecl *ND) const;
+ bool IsClassOrStruct(const NamedDecl *ND) const;
+ bool IsUnion(const NamedDecl *ND) const;
+ bool IsNamespace(const NamedDecl *ND) const;
+ bool IsNamespaceOrAlias(const NamedDecl *ND) const;
+ bool IsType(const NamedDecl *ND) const;
+ bool IsMember(const NamedDecl *ND) const;
+ bool IsObjCIvar(const NamedDecl *ND) const;
+ bool IsObjCMessageReceiver(const NamedDecl *ND) const;
+ bool IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const;
+ bool IsObjCCollection(const NamedDecl *ND) const;
+ bool IsImpossibleToSatisfy(const NamedDecl *ND) const;
+ //@}
+};
+} // namespace
class ResultBuilder::ShadowMapEntry::iterator {
llvm::PointerUnion<const NamedDecl *, const DeclIndexPair *> DeclOrIterator;
@@ -366,20 +362,18 @@ public:
DeclIndexPair Value;
public:
- pointer(const DeclIndexPair &Value) : Value(Value) { }
+ pointer(const DeclIndexPair &Value) : Value(Value) {}
- const DeclIndexPair *operator->() const {
- return &Value;
- }
+ const DeclIndexPair *operator->() const { return &Value; }
};
iterator() : DeclOrIterator((NamedDecl *)nullptr), SingleDeclIndex(0) {}
iterator(const NamedDecl *SingleDecl, unsigned Index)
- : DeclOrIterator(SingleDecl), SingleDeclIndex(Index) { }
+ : DeclOrIterator(SingleDecl), SingleDeclIndex(Index) {}
iterator(const DeclIndexPair *Iterator)
- : DeclOrIterator(Iterator), SingleDeclIndex(0) { }
+ : DeclOrIterator(Iterator), SingleDeclIndex(0) {}
iterator &operator++() {
if (DeclOrIterator.is<const NamedDecl *>()) {
@@ -388,7 +382,7 @@ public:
return *this;
}
- const DeclIndexPair *I = DeclOrIterator.get<const DeclIndexPair*>();
+ const DeclIndexPair *I = DeclOrIterator.get<const DeclIndexPair *>();
++I;
DeclOrIterator = I;
return *this;
@@ -404,17 +398,15 @@ public:
if (const NamedDecl *ND = DeclOrIterator.dyn_cast<const NamedDecl *>())
return reference(ND, SingleDeclIndex);
- return *DeclOrIterator.get<const DeclIndexPair*>();
+ return *DeclOrIterator.get<const DeclIndexPair *>();
}
- pointer operator->() const {
- return pointer(**this);
- }
+ pointer operator->() const { return pointer(**this); }
friend bool operator==(const iterator &X, const iterator &Y) {
- return X.DeclOrIterator.getOpaqueValue()
- == Y.DeclOrIterator.getOpaqueValue() &&
- X.SingleDeclIndex == Y.SingleDeclIndex;
+ return X.DeclOrIterator.getOpaqueValue() ==
+ Y.DeclOrIterator.getOpaqueValue() &&
+ X.SingleDeclIndex == Y.SingleDeclIndex;
}
friend bool operator!=(const iterator &X, const iterator &Y) {
@@ -455,8 +447,7 @@ ResultBuilder::ShadowMapEntry::end() const {
/// \returns a nested name specifier that refers into the target context, or
/// NULL if no qualification is needed.
static NestedNameSpecifier *
-getRequiredQualification(ASTContext &Context,
- const DeclContext *CurContext,
+getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
const DeclContext *TargetContext) {
SmallVector<const DeclContext *, 4> TargetParents;
@@ -474,16 +465,14 @@ getRequiredQualification(ASTContext &Context,
while (!TargetParents.empty()) {
const DeclContext *Parent = TargetParents.pop_back_val();
- if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Parent)) {
+ if (const auto *Namespace = dyn_cast<NamespaceDecl>(Parent)) {
if (!Namespace->getIdentifier())
continue;
Result = NestedNameSpecifier::Create(Context, Result, Namespace);
- }
- else if (const TagDecl *TD = dyn_cast<TagDecl>(Parent))
- Result = NestedNameSpecifier::Create(Context, Result,
- false,
- Context.getTypeDeclType(TD).getTypePtr());
+ } else if (const auto *TD = dyn_cast<TagDecl>(Parent))
+ Result = NestedNameSpecifier::Create(
+ Context, Result, false, Context.getTypeDeclType(TD).getTypePtr());
}
return Result;
}
@@ -496,8 +485,8 @@ static bool isReservedName(const IdentifierInfo *Id,
return false;
const char *Name = Id->getNameStart();
return Name[0] == '_' &&
- (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z' &&
- !doubleUnderscoreOnly));
+ (Name[1] == '_' ||
+ (Name[1] >= 'A' && Name[1] <= 'Z' && !doubleUnderscoreOnly));
}
// Some declarations have reserved names that we don't want to ever show.
@@ -516,9 +505,9 @@ static bool shouldIgnoreDueToReservedName(const NamedDecl *ND, Sema &SemaRef) {
// This allows for system headers providing private symbols with a single
// underscore.
if (isReservedName(Id, /*doubleUnderscoreOnly=*/true) &&
- SemaRef.SourceMgr.isInSystemHeader(
- SemaRef.SourceMgr.getSpellingLoc(ND->getLocation())))
- return true;
+ SemaRef.SourceMgr.isInSystemHeader(
+ SemaRef.SourceMgr.getSpellingLoc(ND->getLocation())))
+ return true;
return false;
}
@@ -552,10 +541,8 @@ bool ResultBuilder::isInterestingDecl(const NamedDecl *ND,
return false;
if (Filter == &ResultBuilder::IsNestedNameSpecifier ||
- (isa<NamespaceDecl>(ND) &&
- Filter != &ResultBuilder::IsNamespace &&
- Filter != &ResultBuilder::IsNamespaceOrAlias &&
- Filter != nullptr))
+ (isa<NamespaceDecl>(ND) && Filter != &ResultBuilder::IsNamespace &&
+ Filter != &ResultBuilder::IsNamespaceOrAlias && Filter != nullptr))
AsNestedNameSpecifier = true;
// Filter out any unwanted results.
@@ -599,8 +586,7 @@ bool ResultBuilder::CheckHiddenResult(Result &R, DeclContext *CurContext,
R.QualifierIsInformative = false;
if (!R.Qualifier)
- R.Qualifier = getRequiredQualification(SemaRef.Context,
- CurContext,
+ R.Qualifier = getRequiredQualification(SemaRef.Context, CurContext,
R.Declaration->getDeclContext());
return false;
}
@@ -611,23 +597,23 @@ SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
switch (T->getTypeClass()) {
case Type::Builtin:
switch (cast<BuiltinType>(T)->getKind()) {
- case BuiltinType::Void:
- return STC_Void;
+ case BuiltinType::Void:
+ return STC_Void;
- case BuiltinType::NullPtr:
- return STC_Pointer;
+ case BuiltinType::NullPtr:
+ return STC_Pointer;
- case BuiltinType::Overload:
- case BuiltinType::Dependent:
- return STC_Other;
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ return STC_Other;
- case BuiltinType::ObjCId:
- case BuiltinType::ObjCClass:
- case BuiltinType::ObjCSel:
- return STC_ObjectiveC;
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return STC_ObjectiveC;
- default:
- return STC_Arithmetic;
+ default:
+ return STC_Arithmetic;
}
case Type::Complex:
@@ -679,21 +665,21 @@ SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
ND = ND->getUnderlyingDecl();
- if (const TypeDecl *Type = dyn_cast<TypeDecl>(ND))
+ if (const auto *Type = dyn_cast<TypeDecl>(ND))
return C.getTypeDeclType(Type);
- if (const ObjCInterfaceDecl *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
+ if (const auto *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
return C.getObjCInterfaceType(Iface);
QualType T;
if (const FunctionDecl *Function = ND->getAsFunction())
T = Function->getCallResultType();
- else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
+ else if (const auto *Method = dyn_cast<ObjCMethodDecl>(ND))
T = Method->getSendResultType();
- else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
+ else if (const auto *Enumerator = dyn_cast<EnumConstantDecl>(ND))
T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext()));
- else if (const ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
+ else if (const auto *Property = dyn_cast<ObjCPropertyDecl>(ND))
T = Property->getType();
- else if (const ValueDecl *Value = dyn_cast<ValueDecl>(ND))
+ else if (const auto *Value = dyn_cast<ValueDecl>(ND))
T = Value->getType();
else
return QualType();
@@ -702,12 +688,12 @@ QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
// get down to the likely type of an expression when the entity is
// used.
do {
- if (const ReferenceType *Ref = T->getAs<ReferenceType>()) {
+ if (const auto *Ref = T->getAs<ReferenceType>()) {
T = Ref->getPointeeType();
continue;
}
- if (const PointerType *Pointer = T->getAs<PointerType>()) {
+ if (const auto *Pointer = T->getAs<PointerType>()) {
if (Pointer->getPointeeType()->isFunctionType()) {
T = Pointer->getPointeeType();
continue;
@@ -716,12 +702,12 @@ QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
break;
}
- if (const BlockPointerType *Block = T->getAs<BlockPointerType>()) {
+ if (const auto *Block = T->getAs<BlockPointerType>()) {
T = Block->getPointeeType();
continue;
}
- if (const FunctionType *Function = T->getAs<FunctionType>()) {
+ if (const auto *Function = T->getAs<FunctionType>()) {
T = Function->getReturnType();
continue;
}
@@ -740,8 +726,7 @@ unsigned ResultBuilder::getBasePriority(const NamedDecl *ND) {
const DeclContext *LexicalDC = ND->getLexicalDeclContext();
if (LexicalDC->isFunctionOrMethod()) {
// _cmd is relatively rare
- if (const ImplicitParamDecl *ImplicitParam =
- dyn_cast<ImplicitParamDecl>(ND))
+ if (const auto *ImplicitParam = dyn_cast<ImplicitParamDecl>(ND))
if (ImplicitParam->getIdentifier() &&
ImplicitParam->getIdentifier()->isStr("_cmd"))
return CCP_ObjC_cmd;
@@ -772,10 +757,10 @@ unsigned ResultBuilder::getBasePriority(const NamedDecl *ND) {
// likely that the user will want to write a type as other declarations.
if ((isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) &&
!(CompletionContext.getKind() == CodeCompletionContext::CCC_Statement ||
- CompletionContext.getKind()
- == CodeCompletionContext::CCC_ObjCMessageReceiver ||
- CompletionContext.getKind()
- == CodeCompletionContext::CCC_ParenthesizedExpression))
+ CompletionContext.getKind() ==
+ CodeCompletionContext::CCC_ObjCMessageReceiver ||
+ CompletionContext.getKind() ==
+ CodeCompletionContext::CCC_ParenthesizedExpression))
return CCP_Type;
return CCP_Declaration;
@@ -785,7 +770,7 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
// If this is an Objective-C method declaration whose selector matches our
// preferred selector, give it a priority boost.
if (!PreferredSelector.isNull())
- if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
+ if (const auto *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
if (PreferredSelector == Method->getSelector())
R.Priority += CCD_SelectorMatch;
@@ -799,20 +784,28 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
if (SemaRef.Context.hasSameUnqualifiedType(PreferredType, TC))
R.Priority /= CCF_ExactTypeMatch;
// Check for nearly-matching types, based on classification of each.
- else if ((getSimplifiedTypeClass(PreferredType)
- == getSimplifiedTypeClass(TC)) &&
+ else if ((getSimplifiedTypeClass(PreferredType) ==
+ getSimplifiedTypeClass(TC)) &&
!(PreferredType->isEnumeralType() && TC->isEnumeralType()))
R.Priority /= CCF_SimilarTypeMatch;
}
}
}
+DeclContext::lookup_result getConstructors(ASTContext &Context,
+ const CXXRecordDecl *Record) {
+ QualType RecordTy = Context.getTypeDeclType(Record);
+ DeclarationName ConstructorName =
+ Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(RecordTy));
+ return Record->lookup(ConstructorName);
+}
+
void ResultBuilder::MaybeAddConstructorResults(Result R) {
if (!SemaRef.getLangOpts().CPlusPlus || !R.Declaration ||
!CompletionContext.wantConstructorResults())
return;
- ASTContext &Context = SemaRef.Context;
const NamedDecl *D = R.Declaration;
const CXXRecordDecl *Record = nullptr;
if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(D))
@@ -830,16 +823,8 @@ void ResultBuilder::MaybeAddConstructorResults(Result R) {
if (!Record)
return;
-
- QualType RecordTy = Context.getTypeDeclType(Record);
- DeclarationName ConstructorName
- = Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(RecordTy));
- DeclContext::lookup_result Ctors = Record->lookup(ConstructorName);
- for (DeclContext::lookup_iterator I = Ctors.begin(),
- E = Ctors.end();
- I != E; ++I) {
- R.Declaration = *I;
+ for (NamedDecl *Ctor : getConstructors(SemaRef.Context, Record)) {
+ R.Declaration = Ctor;
R.CursorKind = getCursorKindForDecl(R.Declaration);
Results.push_back(R);
}
@@ -921,8 +906,8 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
continue;
// Protocols are in distinct namespaces from everything else.
- if (((I->first->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol)
- || (IDNS & Decl::IDNS_ObjCProtocol)) &&
+ if (((I->first->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol) ||
+ (IDNS & Decl::IDNS_ObjCProtocol)) &&
I->first->getIdentifierNamespace() != IDNS)
continue;
@@ -944,18 +929,19 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
R.StartsNestedNameSpecifier = true;
R.Priority = CCP_NestedNameSpecifier;
} else
- AdjustResultPriorityForDecl(R);
+ AdjustResultPriorityForDecl(R);
// If this result is supposed to have an informative qualifier, add one.
if (R.QualifierIsInformative && !R.Qualifier &&
!R.StartsNestedNameSpecifier) {
const DeclContext *Ctx = R.Declaration->getDeclContext();
if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
- Namespace);
+ R.Qualifier =
+ NestedNameSpecifier::Create(SemaRef.Context, nullptr, Namespace);
else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
- false, SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ R.Qualifier = NestedNameSpecifier::Create(
+ SemaRef.Context, nullptr, false,
+ SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
else
R.QualifierIsInformative = false;
}
@@ -969,6 +955,11 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
MaybeAddConstructorResults(R);
}
+static void setInBaseClass(ResultBuilder::Result &R) {
+ R.Priority += CCD_InBaseClass;
+ R.InBaseClass = true;
+}
+
void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
NamedDecl *Hiding, bool InBaseClass = false) {
if (R.Kind != Result::RK_Declaration) {
@@ -978,7 +969,7 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
}
// Look through using declarations.
- if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
+ if (const auto *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
CodeCompletionResult Result(Using->getTargetDecl(),
getBasePriority(Using->getTargetDecl()),
R.Qualifier);
@@ -1017,27 +1008,27 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (R.QualifierIsInformative && !R.Qualifier &&
!R.StartsNestedNameSpecifier) {
const DeclContext *Ctx = R.Declaration->getDeclContext();
- if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
- Namespace);
- else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr, false,
- SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ if (const auto *Namespace = dyn_cast<NamespaceDecl>(Ctx))
+ R.Qualifier =
+ NestedNameSpecifier::Create(SemaRef.Context, nullptr, Namespace);
+ else if (const auto *Tag = dyn_cast<TagDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(
+ SemaRef.Context, nullptr, false,
+ SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
else
R.QualifierIsInformative = false;
}
// Adjust the priority if this result comes from a base class.
if (InBaseClass)
- R.Priority += CCD_InBaseClass;
+ setInBaseClass(R);
AdjustResultPriorityForDecl(R);
if (HasObjectTypeQualifiers)
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
if (Method->isInstance()) {
- Qualifiers MethodQuals
- = Qualifiers::fromCVRMask(Method->getTypeQualifiers());
+ Qualifiers MethodQuals = Method->getTypeQualifiers();
if (ObjectTypeQualifiers == MethodQuals)
R.Priority += CCD_ObjectQualifierMatch;
else if (ObjectTypeQualifiers - MethodQuals) {
@@ -1056,7 +1047,7 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
void ResultBuilder::AddResult(Result R) {
assert(R.Kind != Result::RK_Declaration &&
- "Declaration results need more context");
+ "Declaration results need more context");
Results.push_back(R);
}
@@ -1066,9 +1057,8 @@ void ResultBuilder::EnterNewScope() { ShadowMaps.emplace_back(); }
/// Exit from the current scope.
void ResultBuilder::ExitScope() {
for (ShadowMap::iterator E = ShadowMaps.back().begin(),
- EEnd = ShadowMaps.back().end();
- E != EEnd;
- ++E)
+ EEnd = ShadowMaps.back().end();
+ E != EEnd; ++E)
E->second.Destroy();
ShadowMaps.pop_back();
@@ -1084,7 +1074,7 @@ bool ResultBuilder::IsOrdinaryName(const NamedDecl *ND) const {
unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
- else if (SemaRef.getLangOpts().ObjC1) {
+ else if (SemaRef.getLangOpts().ObjC) {
if (isa<ObjCIvarDecl>(ND))
return true;
}
@@ -1109,7 +1099,7 @@ bool ResultBuilder::IsOrdinaryNonTypeName(const NamedDecl *ND) const {
unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
- else if (SemaRef.getLangOpts().ObjC1) {
+ else if (SemaRef.getLangOpts().ObjC) {
if (isa<ObjCIvarDecl>(ND))
return true;
}
@@ -1121,7 +1111,7 @@ bool ResultBuilder::IsIntegralConstantValue(const NamedDecl *ND) const {
if (!IsOrdinaryNonTypeName(ND))
return 0;
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
+ if (const auto *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
if (VD->getType()->isIntegralOrEnumerationType())
return true;
@@ -1137,16 +1127,15 @@ bool ResultBuilder::IsOrdinaryNonValueName(const NamedDecl *ND) const {
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace;
- return (ND->getIdentifierNamespace() & IDNS) &&
- !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND) &&
- !isa<ObjCPropertyDecl>(ND);
+ return (ND->getIdentifierNamespace() & IDNS) && !isa<ValueDecl>(ND) &&
+ !isa<FunctionTemplateDecl>(ND) && !isa<ObjCPropertyDecl>(ND);
}
/// Determines whether the given declaration is suitable as the
/// start of a C++ nested-name-specifier, e.g., a class or namespace.
bool ResultBuilder::IsNestedNameSpecifier(const NamedDecl *ND) const {
// Allow us to find class templates, too.
- if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ if (const auto *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
return SemaRef.isAcceptableNestedNameSpecifier(ND);
@@ -1160,14 +1149,13 @@ bool ResultBuilder::IsEnum(const NamedDecl *ND) const {
/// Determines whether the given declaration is a class or struct.
bool ResultBuilder::IsClassOrStruct(const NamedDecl *ND) const {
// Allow us to find class templates, too.
- if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ if (const auto *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
// For purposes of this check, interfaces match too.
- if (const RecordDecl *RD = dyn_cast<RecordDecl>(ND))
- return RD->getTagKind() == TTK_Class ||
- RD->getTagKind() == TTK_Struct ||
- RD->getTagKind() == TTK_Interface;
+ if (const auto *RD = dyn_cast<RecordDecl>(ND))
+ return RD->getTagKind() == TTK_Class || RD->getTagKind() == TTK_Struct ||
+ RD->getTagKind() == TTK_Interface;
return false;
}
@@ -1175,10 +1163,10 @@ bool ResultBuilder::IsClassOrStruct(const NamedDecl *ND) const {
/// Determines whether the given declaration is a union.
bool ResultBuilder::IsUnion(const NamedDecl *ND) const {
// Allow us to find class templates, too.
- if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ if (const auto *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
- if (const RecordDecl *RD = dyn_cast<RecordDecl>(ND))
+ if (const auto *RD = dyn_cast<RecordDecl>(ND))
return RD->getTagKind() == TTK_Union;
return false;
@@ -1252,11 +1240,12 @@ bool ResultBuilder::IsObjCMessageReceiver(const NamedDecl *ND) const {
return isObjCReceiverType(SemaRef.Context, T);
}
-bool ResultBuilder::IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const {
+bool ResultBuilder::IsObjCMessageReceiverOrLambdaCapture(
+ const NamedDecl *ND) const {
if (IsObjCMessageReceiver(ND))
return true;
- const VarDecl *Var = dyn_cast<VarDecl>(ND);
+ const auto *Var = dyn_cast<VarDecl>(ND);
if (!Var)
return false;
@@ -1289,34 +1278,80 @@ bool ResultBuilder::IsObjCIvar(const NamedDecl *ND) const {
}
namespace {
- /// Visible declaration consumer that adds a code-completion result
- /// for each visible declaration.
- class CodeCompletionDeclConsumer : public VisibleDeclConsumer {
- ResultBuilder &Results;
- DeclContext *CurContext;
- std::vector<FixItHint> FixIts;
- public:
- CodeCompletionDeclConsumer(
- ResultBuilder &Results, DeclContext *CurContext,
- std::vector<FixItHint> FixIts = std::vector<FixItHint>())
- : Results(Results), CurContext(CurContext), FixIts(std::move(FixIts)) {}
+/// Visible declaration consumer that adds a code-completion result
+/// for each visible declaration.
+class CodeCompletionDeclConsumer : public VisibleDeclConsumer {
+ ResultBuilder &Results;
+ DeclContext *InitialLookupCtx;
+ // NamingClass and BaseType are used for access-checking. See
+ // Sema::IsSimplyAccessible for details.
+ CXXRecordDecl *NamingClass;
+ QualType BaseType;
+ std::vector<FixItHint> FixIts;
- void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
- bool InBaseClass) override {
- bool Accessible = true;
- if (Ctx)
- Accessible = Results.getSema().IsSimplyAccessible(ND, Ctx);
- ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
- false, Accessible, FixIts);
- Results.AddResult(Result, CurContext, Hiding, InBaseClass);
+public:
+ CodeCompletionDeclConsumer(
+ ResultBuilder &Results, DeclContext *InitialLookupCtx,
+ QualType BaseType = QualType(),
+ std::vector<FixItHint> FixIts = std::vector<FixItHint>())
+ : Results(Results), InitialLookupCtx(InitialLookupCtx),
+ FixIts(std::move(FixIts)) {
+ NamingClass = llvm::dyn_cast<CXXRecordDecl>(InitialLookupCtx);
+ // If BaseType was not provided explicitly, emulate implicit 'this->'.
+ if (BaseType.isNull()) {
+ auto ThisType = Results.getSema().getCurrentThisType();
+ if (!ThisType.isNull()) {
+ assert(ThisType->isPointerType());
+ BaseType = ThisType->getPointeeType();
+ if (!NamingClass)
+ NamingClass = BaseType->getAsCXXRecordDecl();
+ }
}
-
- void EnteredContext(DeclContext* Ctx) override {
- Results.addVisitedContext(Ctx);
+ this->BaseType = BaseType;
+ }
+
+ void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
+ bool InBaseClass) override {
+ ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
+ false, IsAccessible(ND, Ctx), FixIts);
+ Results.AddResult(Result, InitialLookupCtx, Hiding, InBaseClass);
+ }
+
+ void EnteredContext(DeclContext *Ctx) override {
+ Results.addVisitedContext(Ctx);
+ }
+
+private:
+ bool IsAccessible(NamedDecl *ND, DeclContext *Ctx) {
+ // Naming class to use for access check. In most cases it was provided
+ // explicitly (e.g. member access (lhs.foo) or qualified lookup (X::)),
+ // for unqualified lookup we fallback to the \p Ctx in which we found the
+ // member.
+ auto *NamingClass = this->NamingClass;
+ QualType BaseType = this->BaseType;
+ if (auto *Cls = llvm::dyn_cast_or_null<CXXRecordDecl>(Ctx)) {
+ if (!NamingClass)
+ NamingClass = Cls;
+ // When we emulate implicit 'this->' in an unqualified lookup, we might
+ // end up with an invalid naming class. In that case, we avoid emulating
+ // 'this->' qualifier to satisfy preconditions of the access checking.
+ if (NamingClass->getCanonicalDecl() != Cls->getCanonicalDecl() &&
+ !NamingClass->isDerivedFrom(Cls)) {
+ NamingClass = Cls;
+ BaseType = QualType();
+ }
+ } else {
+ // The decl was found outside the C++ class, so only ObjC access checks
+ // apply. Those do not rely on NamingClass and BaseType, so we clear them
+ // out.
+ NamingClass = nullptr;
+ BaseType = QualType();
}
- };
-}
+ return Results.getSema().IsSimplyAccessible(ND, NamingClass, BaseType);
+ }
+};
+} // namespace
/// Add type specifiers for the current language as keyword results.
static void AddTypeSpecifierResults(const LangOptions &LangOpts,
@@ -1349,8 +1384,8 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Results.getCodeCompletionTUInfo());
if (LangOpts.CPlusPlus) {
// C++-specific
- Results.AddResult(Result("bool", CCP_Type +
- (LangOpts.ObjC1? CCD_bool_in_ObjC : 0)));
+ Results.AddResult(
+ Result("bool", CCP_Type + (LangOpts.ObjC ? CCD_bool_in_ObjC : 0)));
Results.AddResult(Result("class", CCP_Type));
Results.AddResult(Result("wchar_t", CCP_Type));
@@ -1466,14 +1501,11 @@ static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt);
static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt);
static void AddObjCVisibilityResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt);
+ ResultBuilder &Results, bool NeedAt);
static void AddObjCImplementationResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt);
+ ResultBuilder &Results, bool NeedAt);
static void AddObjCInterfaceResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt);
+ ResultBuilder &Results, bool NeedAt);
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt);
static void AddTypedefResult(ResultBuilder &Results) {
@@ -1511,7 +1543,7 @@ static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
return false;
case Sema::PCC_ForInit:
- return LangOpts.CPlusPlus || LangOpts.ObjC1 || LangOpts.C99;
+ return LangOpts.CPlusPlus || LangOpts.ObjC || LangOpts.C99;
}
llvm_unreachable("Invalid ParserCompletionContext!");
@@ -1537,8 +1569,7 @@ static PrintingPolicy getCompletionPrintingPolicy(Sema &S) {
///
/// This routine provides a fast path where we provide constant strings for
/// common type names.
-static const char *GetCompletionTypeString(QualType T,
- ASTContext &Context,
+static const char *GetCompletionTypeString(QualType T, ASTContext &Context,
const PrintingPolicy &Policy,
CodeCompletionAllocator &Allocator) {
if (!T.getLocalQualifiers()) {
@@ -1551,11 +1582,16 @@ static const char *GetCompletionTypeString(QualType T,
if (TagDecl *Tag = TagT->getDecl())
if (!Tag->hasNameForLinkage()) {
switch (Tag->getTagKind()) {
- case TTK_Struct: return "struct <anonymous>";
- case TTK_Interface: return "__interface <anonymous>";
- case TTK_Class: return "class <anonymous>";
- case TTK_Union: return "union <anonymous>";
- case TTK_Enum: return "enum <anonymous>";
+ case TTK_Struct:
+ return "struct <anonymous>";
+ case TTK_Interface:
+ return "__interface <anonymous>";
+ case TTK_Class:
+ return "class <anonymous>";
+ case TTK_Union:
+ return "union <anonymous>";
+ case TTK_Enum:
+ return "enum <anonymous>";
}
}
}
@@ -1575,10 +1611,8 @@ static void addThisCompletion(Sema &S, ResultBuilder &Results) {
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
- Builder.AddResultTypeChunk(GetCompletionTypeString(ThisTy,
- S.Context,
- Policy,
- Allocator));
+ Builder.AddResultTypeChunk(
+ GetCompletionTypeString(ThisTy, S.Context, Policy, Allocator));
Builder.AddTypedTextChunk("this");
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
@@ -1598,11 +1632,76 @@ static void AddStaticAssertResult(CodeCompletionBuilder &Builder,
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
+static void printOverrideString(llvm::raw_ostream &OS,
+ CodeCompletionString *CCS) {
+ for (const auto &C : *CCS) {
+ if (C.Kind == CodeCompletionString::CK_Optional)
+ printOverrideString(OS, C.Optional);
+ else
+ OS << C.Text;
+ // Add a space after return type.
+ if (C.Kind == CodeCompletionString::CK_ResultType)
+ OS << ' ';
+ }
+}
+
+static void AddOverrideResults(ResultBuilder &Results,
+ const CodeCompletionContext &CCContext,
+ CodeCompletionBuilder &Builder) {
+ Sema &S = Results.getSema();
+ const auto *CR = llvm::dyn_cast<CXXRecordDecl>(S.CurContext);
+ // If not inside a class/struct/union return empty.
+ if (!CR)
+ return;
+ // First store overrides within current class.
+ // These are stored by name to make querying fast in the later step.
+ llvm::StringMap<std::vector<FunctionDecl *>> Overrides;
+ for (auto *Method : CR->methods()) {
+ if (!Method->isVirtual() || !Method->getIdentifier())
+ continue;
+ Overrides[Method->getName()].push_back(Method);
+ }
+
+ for (const auto &Base : CR->bases()) {
+ const auto *BR = Base.getType().getTypePtr()->getAsCXXRecordDecl();
+ if (!BR)
+ continue;
+ for (auto *Method : BR->methods()) {
+ if (!Method->isVirtual() || !Method->getIdentifier())
+ continue;
+ const auto it = Overrides.find(Method->getName());
+ bool IsOverriden = false;
+ if (it != Overrides.end()) {
+ for (auto *MD : it->second) {
+ // If the method in current body is not an overload of this virtual
+ // function, then it overrides this one.
+ if (!S.IsOverload(MD, Method, false)) {
+ IsOverriden = true;
+ break;
+ }
+ }
+ }
+ if (!IsOverriden) {
+ // Generates a new CodeCompletionResult by taking this function and
+ // converting it into an override declaration with only one chunk in the
+ // final CodeCompletionString as a TypedTextChunk.
+ std::string OverrideSignature;
+ llvm::raw_string_ostream OS(OverrideSignature);
+ CodeCompletionResult CCR(Method, 0);
+ PrintingPolicy Policy =
+ getCompletionPrintingPolicy(S.getASTContext(), S.getPreprocessor());
+ auto *CCS = CCR.createCodeCompletionStringForOverride(
+ S.getPreprocessor(), S.getASTContext(), Builder,
+ /*IncludeBriefComments=*/false, CCContext, Policy);
+ Results.AddResult(CodeCompletionResult(CCS, Method, CCP_CodePattern));
+ }
+ }
+ }
+}
+
/// Add language constructs that show up for "ordinary" names.
-static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
- Scope *S,
- Sema &SemaRef,
- ResultBuilder &Results) {
+static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
+ Sema &SemaRef, ResultBuilder &Results) {
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
@@ -1651,10 +1750,12 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("declaration");
Results.AddResult(Result(Builder.TakeString()));
+ } else {
+ Results.AddResult(Result("template", CodeCompletionResult::RK_Keyword));
}
}
- if (SemaRef.getLangOpts().ObjC1)
+ if (SemaRef.getLangOpts().ObjC)
AddObjCTopLevelResults(Results, true);
AddTypedefResult(Results);
@@ -1706,6 +1807,12 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
if (IsNotInheritanceScope && Results.includeCodePatterns())
Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
+
+ // FIXME: This adds override results only if we are at the first word of
+ // the declaration/definition. Also call this from other sides to have
+ // more use-cases.
+ AddOverrideResults(Results, CodeCompletionContext::CCC_ClassStructUnion,
+ Builder);
}
}
LLVM_FALLTHROUGH;
@@ -1719,6 +1826,8 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddPlaceholderChunk("parameters");
Builder.AddChunk(CodeCompletionString::CK_RightAngle);
Results.AddResult(Result(Builder.TakeString()));
+ } else {
+ Results.AddResult(Result("template", CodeCompletionResult::RK_Keyword));
}
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
@@ -1762,7 +1871,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
- if (SemaRef.getLangOpts().ObjC1)
+ if (SemaRef.getLangOpts().ObjC)
AddObjCStatementResults(Results, true);
if (Results.includeCodePatterns()) {
@@ -1795,7 +1904,8 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
}
// Switch-specific statements.
- if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
+ if (SemaRef.getCurFunction() &&
+ !SemaRef.getCurFunction()->SwitchStack.empty()) {
// case expression:
Builder.AddTypedTextChunk("case");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -1871,10 +1981,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
// "return expression ;" or "return ;", depending on whether we
// know the function is void or not.
bool isVoid = false;
- if (FunctionDecl *Function = dyn_cast<FunctionDecl>(SemaRef.CurContext))
+ if (const auto *Function = dyn_cast<FunctionDecl>(SemaRef.CurContext))
isVoid = Function->getReturnType()->isVoidType();
- else if (ObjCMethodDecl *Method
- = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
+ else if (const auto *Method = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
isVoid = Method->getReturnType()->isVoidType();
else if (SemaRef.getCurBlock() &&
!SemaRef.getCurBlock()->ReturnType.isNull())
@@ -1902,7 +2011,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
AddStaticAssertResult(Builder, Results, SemaRef.getLangOpts());
}
- LLVM_FALLTHROUGH;
+ LLVM_FALLTHROUGH;
// Fall through (for statement expressions).
case Sema::PCC_ForInit:
@@ -2089,7 +2198,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
}
}
- if (SemaRef.getLangOpts().ObjC1) {
+ if (SemaRef.getLangOpts().ObjC) {
// Add "super", if we're in an Objective-C class with a superclass.
if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
// The interface can be NULL.
@@ -2148,8 +2257,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
/// type chunk.
static void AddResultTypeChunk(ASTContext &Context,
const PrintingPolicy &Policy,
- const NamedDecl *ND,
- QualType BaseType,
+ const NamedDecl *ND, QualType BaseType,
CodeCompletionBuilder &Result) {
if (!ND)
return;
@@ -2163,24 +2271,24 @@ static void AddResultTypeChunk(ASTContext &Context,
QualType T;
if (const FunctionDecl *Function = ND->getAsFunction())
T = Function->getReturnType();
- else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
+ else if (const auto *Method = dyn_cast<ObjCMethodDecl>(ND)) {
if (!BaseType.isNull())
T = Method->getSendResultType(BaseType);
else
T = Method->getReturnType();
- } else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND)) {
+ } else if (const auto *Enumerator = dyn_cast<EnumConstantDecl>(ND)) {
T = Context.getTypeDeclType(cast<TypeDecl>(Enumerator->getDeclContext()));
T = clang::TypeName::getFullyQualifiedType(T, Context);
} else if (isa<UnresolvedUsingValueDecl>(ND)) {
/* Do nothing: ignore unresolved using declarations*/
- } else if (const ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
+ } else if (const auto *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
if (!BaseType.isNull())
T = Ivar->getUsageType(BaseType);
else
T = Ivar->getType();
- } else if (const ValueDecl *Value = dyn_cast<ValueDecl>(ND)) {
+ } else if (const auto *Value = dyn_cast<ValueDecl>(ND)) {
T = Value->getType();
- } else if (const ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND)) {
+ } else if (const auto *Property = dyn_cast<ObjCPropertyDecl>(ND)) {
if (!BaseType.isNull())
T = Property->getUsageType(BaseType);
else
@@ -2190,8 +2298,8 @@ static void AddResultTypeChunk(ASTContext &Context,
if (T.isNull() || Context.hasSameType(T, Context.DependentTy))
return;
- Result.AddResultTypeChunk(GetCompletionTypeString(T, Context, Policy,
- Result.getAllocator()));
+ Result.AddResultTypeChunk(
+ GetCompletionTypeString(T, Context, Policy, Result.getAllocator()));
}
static void MaybeAddSentinel(Preprocessor &PP,
@@ -2199,7 +2307,7 @@ static void MaybeAddSentinel(Preprocessor &PP,
CodeCompletionBuilder &Result) {
if (SentinelAttr *Sentinel = FunctionOrMethod->getAttr<SentinelAttr>())
if (Sentinel->getSentinel() == 0) {
- if (PP.getLangOpts().ObjC1 && PP.isMacroDefined("nil"))
+ if (PP.getLangOpts().ObjC && PP.isMacroDefined("nil"))
Result.AddTextChunk(", nil");
else if (PP.isMacroDefined("NULL"))
Result.AddTextChunk(", NULL");
@@ -2297,11 +2405,10 @@ formatBlockPlaceholder(const PrintingPolicy &Policy, const NamedDecl *BlockDecl,
bool SuppressBlock = false,
Optional<ArrayRef<QualType>> ObjCSubsts = None);
-static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
- const ParmVarDecl *Param,
- bool SuppressName = false,
- bool SuppressBlock = false,
- Optional<ArrayRef<QualType>> ObjCSubsts = None) {
+static std::string
+FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
+ bool SuppressName = false, bool SuppressBlock = false,
+ Optional<ArrayRef<QualType>> ObjCSubsts = None) {
bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
if (Param->getType()->isDependentType() ||
!Param->getType()->isBlockPointerType()) {
@@ -2317,8 +2424,8 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
Type = Type.substObjCTypeArgs(Param->getASTContext(), *ObjCSubsts,
ObjCSubstitutionContext::Parameter);
if (ObjCMethodParam) {
- Result = "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(),
- Type);
+ Result =
+ "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(), Type);
Result += Type.getAsString(Policy) + ")";
if (Param->getIdentifier() && !SuppressName)
Result += Param->getIdentifier()->getName();
@@ -2449,13 +2556,15 @@ static std::string GetDefaultValueString(const ParmVarDecl *Param,
bool Invalid = CharSrcRange.isInvalid();
if (Invalid)
return "";
- StringRef srcText = Lexer::getSourceText(CharSrcRange, SM, LangOpts, &Invalid);
+ StringRef srcText =
+ Lexer::getSourceText(CharSrcRange, SM, LangOpts, &Invalid);
if (Invalid)
return "";
if (srcText.empty() || srcText == "=") {
// Lexer can't determine the value.
- // This happens if the code is incorrect (for example class is forward declared).
+ // This happens if the code is incorrect (for example class is forward
+ // declared).
return "";
}
std::string DefValue(srcText.str());
@@ -2463,7 +2572,8 @@ static std::string GetDefaultValueString(const ParmVarDecl *Param,
// this value always has (or always does not have) '=' in front of it
if (DefValue.at(0) != '=') {
// If we don't have '=' in front of value.
- // Lexer returns built-in types values without '=' and user-defined types values with it.
+ // Lexer returns built-in types values without '=' and user-defined types
+ // values with it.
return " = " + DefValue;
}
return " " + DefValue;
@@ -2503,18 +2613,18 @@ static void AddFunctionParameterChunks(Preprocessor &PP,
// Format the placeholder string.
std::string PlaceholderStr = FormatFunctionParameter(Policy, Param);
if (Param->hasDefaultArg())
- PlaceholderStr += GetDefaultValueString(Param, PP.getSourceManager(), PP.getLangOpts());
+ PlaceholderStr +=
+ GetDefaultValueString(Param, PP.getSourceManager(), PP.getLangOpts());
if (Function->isVariadic() && P == N - 1)
PlaceholderStr += ", ...";
// Add the placeholder string.
Result.AddPlaceholderChunk(
- Result.getAllocator().CopyString(PlaceholderStr));
+ Result.getAllocator().CopyString(PlaceholderStr));
}
- if (const FunctionProtoType *Proto
- = Function->getType()->getAs<FunctionProtoType>())
+ if (const auto *Proto = Function->getType()->getAs<FunctionProtoType>())
if (Proto->isVariadic()) {
if (Proto->getNumParams() == 0)
Result.AddPlaceholderChunk("...");
@@ -2524,13 +2634,10 @@ static void AddFunctionParameterChunks(Preprocessor &PP,
}
/// Add template parameter chunks to the given code completion string.
-static void AddTemplateParameterChunks(ASTContext &Context,
- const PrintingPolicy &Policy,
- const TemplateDecl *Template,
- CodeCompletionBuilder &Result,
- unsigned MaxParameters = 0,
- unsigned Start = 0,
- bool InDefaultArg = false) {
+static void AddTemplateParameterChunks(
+ ASTContext &Context, const PrintingPolicy &Policy,
+ const TemplateDecl *Template, CodeCompletionBuilder &Result,
+ unsigned MaxParameters = 0, unsigned Start = 0, bool InDefaultArg = false) {
bool FirstParameter = true;
// Prefer to take the template parameter names from the first declaration of
@@ -2541,8 +2648,8 @@ static void AddTemplateParameterChunks(ASTContext &Context,
TemplateParameterList::iterator PEnd = Params->end();
if (MaxParameters)
PEnd = Params->begin() + MaxParameters;
- for (TemplateParameterList::iterator P = Params->begin() + Start;
- P != PEnd; ++P) {
+ for (TemplateParameterList::iterator P = Params->begin() + Start; P != PEnd;
+ ++P) {
bool HasDefaultArg = false;
std::string PlaceholderStr;
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
@@ -2557,8 +2664,8 @@ static void AddTemplateParameterChunks(ASTContext &Context,
}
HasDefaultArg = TTP->hasDefaultArgument();
- } else if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ } else if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
PlaceholderStr = NTTP->getIdentifier()->getName();
NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
@@ -2600,18 +2707,17 @@ static void AddTemplateParameterChunks(ASTContext &Context,
// Add the placeholder string.
Result.AddPlaceholderChunk(
- Result.getAllocator().CopyString(PlaceholderStr));
+ Result.getAllocator().CopyString(PlaceholderStr));
}
}
/// Add a qualifier to the given code-completion string, if the
/// provided nested-name-specifier is non-NULL.
-static void
-AddQualifierToCompletionString(CodeCompletionBuilder &Result,
- NestedNameSpecifier *Qualifier,
- bool QualifierIsInformative,
- ASTContext &Context,
- const PrintingPolicy &Policy) {
+static void AddQualifierToCompletionString(CodeCompletionBuilder &Result,
+ NestedNameSpecifier *Qualifier,
+ bool QualifierIsInformative,
+ ASTContext &Context,
+ const PrintingPolicy &Policy) {
if (!Qualifier)
return;
@@ -2629,25 +2735,24 @@ AddQualifierToCompletionString(CodeCompletionBuilder &Result,
static void
AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
const FunctionDecl *Function) {
- const FunctionProtoType *Proto
- = Function->getType()->getAs<FunctionProtoType>();
+ const auto *Proto = Function->getType()->getAs<FunctionProtoType>();
if (!Proto || !Proto->getTypeQuals())
return;
// FIXME: Add ref-qualifier!
// Handle single qualifiers without copying
- if (Proto->getTypeQuals() == Qualifiers::Const) {
+ if (Proto->getTypeQuals().hasOnlyConst()) {
Result.AddInformativeChunk(" const");
return;
}
- if (Proto->getTypeQuals() == Qualifiers::Volatile) {
+ if (Proto->getTypeQuals().hasOnlyVolatile()) {
Result.AddInformativeChunk(" volatile");
return;
}
- if (Proto->getTypeQuals() == Qualifiers::Restrict) {
+ if (Proto->getTypeQuals().hasOnlyRestrict()) {
Result.AddInformativeChunk(" restrict");
return;
}
@@ -2672,37 +2777,51 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
return;
switch (Name.getNameKind()) {
- case DeclarationName::CXXOperatorName: {
- const char *OperatorName = nullptr;
- switch (Name.getCXXOverloadedOperator()) {
- case OO_None:
- case OO_Conditional:
- case NUM_OVERLOADED_OPERATORS:
- OperatorName = "operator";
- break;
+ case DeclarationName::CXXOperatorName: {
+ const char *OperatorName = nullptr;
+ switch (Name.getCXXOverloadedOperator()) {
+ case OO_None:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ OperatorName = "operator";
+ break;
-#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
- case OO_##Name: OperatorName = "operator" Spelling; break;
-#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
+ case OO_##Name: \
+ OperatorName = "operator" Spelling; \
+ break;
+#define OVERLOADED_OPERATOR_MULTI(Name, Spelling, Unary, Binary, MemberOnly)
#include "clang/Basic/OperatorKinds.def"
- case OO_New: OperatorName = "operator new"; break;
- case OO_Delete: OperatorName = "operator delete"; break;
- case OO_Array_New: OperatorName = "operator new[]"; break;
- case OO_Array_Delete: OperatorName = "operator delete[]"; break;
- case OO_Call: OperatorName = "operator()"; break;
- case OO_Subscript: OperatorName = "operator[]"; break;
- }
- Result.AddTypedTextChunk(OperatorName);
+ case OO_New:
+ OperatorName = "operator new";
+ break;
+ case OO_Delete:
+ OperatorName = "operator delete";
+ break;
+ case OO_Array_New:
+ OperatorName = "operator new[]";
+ break;
+ case OO_Array_Delete:
+ OperatorName = "operator delete[]";
+ break;
+ case OO_Call:
+ OperatorName = "operator()";
+ break;
+ case OO_Subscript:
+ OperatorName = "operator[]";
break;
}
+ Result.AddTypedTextChunk(OperatorName);
+ break;
+ }
case DeclarationName::Identifier:
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXLiteralOperatorName:
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.getAllocator().CopyString(ND->getNameAsString()));
break;
case DeclarationName::CXXDeductionGuideName:
@@ -2715,19 +2834,18 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
case DeclarationName::CXXConstructorName: {
CXXRecordDecl *Record = nullptr;
QualType Ty = Name.getCXXNameType();
- if (const RecordType *RecordTy = Ty->getAs<RecordType>())
+ if (const auto *RecordTy = Ty->getAs<RecordType>())
Record = cast<CXXRecordDecl>(RecordTy->getDecl());
- else if (const InjectedClassNameType *InjectedTy
- = Ty->getAs<InjectedClassNameType>())
+ else if (const auto *InjectedTy = Ty->getAs<InjectedClassNameType>())
Record = InjectedTy->getDecl();
else {
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.getAllocator().CopyString(ND->getNameAsString()));
break;
}
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(Record->getNameAsString()));
+ Result.getAllocator().CopyString(Record->getNameAsString()));
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) {
Result.AddChunk(CodeCompletionString::CK_LeftAngle);
AddTemplateParameterChunks(Context, Policy, Template, Result);
@@ -2738,11 +2856,10 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
}
}
-CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(Sema &S,
- const CodeCompletionContext &CCContext,
- CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- bool IncludeBriefComments) {
+CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(
+ Sema &S, const CodeCompletionContext &CCContext,
+ CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments) {
return CreateCodeCompletionString(S.Context, S.PP, CCContext, Allocator,
CCTUInfo, IncludeBriefComments);
}
@@ -2799,13 +2916,10 @@ CodeCompletionString *CodeCompletionResult::CreateCodeCompletionStringForMacro(
/// \returns Either a new, heap-allocated code completion string describing
/// how to use this result, or NULL to indicate that the string or name of the
/// result is all that is needed.
-CodeCompletionString *
-CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
- Preprocessor &PP,
- const CodeCompletionContext &CCContext,
- CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- bool IncludeBriefComments) {
+CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(
+ ASTContext &Ctx, Preprocessor &PP, const CodeCompletionContext &CCContext,
+ CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments) {
if (Kind == RK_Macro)
return CreateCodeCompletionStringForMacro(PP, Allocator, CCTUInfo);
@@ -2834,6 +2948,30 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
return Result.TakeString();
}
assert(Kind == RK_Declaration && "Missed a result kind?");
+ return createCodeCompletionStringForDecl(
+ PP, Ctx, Result, IncludeBriefComments, CCContext, Policy);
+}
+
+CodeCompletionString *
+CodeCompletionResult::createCodeCompletionStringForOverride(
+ Preprocessor &PP, ASTContext &Ctx, CodeCompletionBuilder &Result,
+ bool IncludeBriefComments, const CodeCompletionContext &CCContext,
+ PrintingPolicy &Policy) {
+ std::string OverrideSignature;
+ llvm::raw_string_ostream OS(OverrideSignature);
+ auto *CCS = createCodeCompletionStringForDecl(PP, Ctx, Result,
+ /*IncludeBriefComments=*/false,
+ CCContext, Policy);
+ printOverrideString(OS, CCS);
+ OS << " override";
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(OS.str()));
+ return Result.TakeString();
+}
+
+CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl(
+ Preprocessor &PP, ASTContext &Ctx, CodeCompletionBuilder &Result,
+ bool IncludeBriefComments, const CodeCompletionContext &CCContext,
+ PrintingPolicy &Policy) {
const NamedDecl *ND = Declaration;
Result.addParentContext(ND->getDeclContext());
@@ -2846,7 +2984,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
if (StartsNestedNameSpecifier) {
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.getAllocator().CopyString(ND->getNameAsString()));
Result.AddTextChunk("::");
return Result.TakeString();
}
@@ -2856,7 +2994,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
AddResultTypeChunk(Ctx, Policy, ND, CCContext.getBaseType(), Result);
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
+ if (const auto *Function = dyn_cast<FunctionDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
AddTypedNameChunk(Ctx, Policy, ND, Result);
@@ -2867,7 +3005,8 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
return Result.TakeString();
}
- if (const FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND)) {
+ if (const FunctionTemplateDecl *FunTmpl =
+ dyn_cast<FunctionTemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
FunctionDecl *Function = FunTmpl->getTemplatedDecl();
@@ -2886,16 +3025,16 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
// FIXME: We need to abstract template parameters better!
bool HasDefaultArg = false;
NamedDecl *Param = FunTmpl->getTemplateParameters()->getParam(
- LastDeducibleArgument - 1);
+ LastDeducibleArgument - 1);
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
HasDefaultArg = TTP->hasDefaultArgument();
- else if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(Param))
+ else if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(Param))
HasDefaultArg = NTTP->hasDefaultArgument();
else {
assert(isa<TemplateTemplateParmDecl>(Param));
- HasDefaultArg
- = cast<TemplateTemplateParmDecl>(Param)->hasDefaultArgument();
+ HasDefaultArg =
+ cast<TemplateTemplateParmDecl>(Param)->hasDefaultArgument();
}
if (!HasDefaultArg)
@@ -2921,22 +3060,21 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
return Result.TakeString();
}
- if (const TemplateDecl *Template = dyn_cast<TemplateDecl>(ND)) {
+ if (const auto *Template = dyn_cast<TemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(Template->getNameAsString()));
+ Result.getAllocator().CopyString(Template->getNameAsString()));
Result.AddChunk(CodeCompletionString::CK_LeftAngle);
AddTemplateParameterChunks(Ctx, Policy, Template, Result);
Result.AddChunk(CodeCompletionString::CK_RightAngle);
return Result.TakeString();
}
-
- if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
+ if (const auto *Method = dyn_cast<ObjCMethodDecl>(ND)) {
Selector Sel = Method->getSelector();
if (Sel.isUnarySelector()) {
- Result.AddTypedTextChunk(Result.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Sel.getNameForSlot(0)));
return Result.TakeString();
}
@@ -2954,7 +3092,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
}
unsigned Idx = 0;
for (ObjCMethodDecl::param_const_iterator P = Method->param_begin(),
- PEnd = Method->param_end();
+ PEnd = Method->param_end();
P != PEnd; (void)++P, ++Idx) {
if (Idx > 0) {
std::string Keyword;
@@ -2981,12 +3119,11 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
if (ParamType->isBlockPointerType() && !DeclaringEntity)
Arg = FormatFunctionParameter(Policy, *P, true,
- /*SuppressBlock=*/false,
- ObjCSubsts);
+ /*SuppressBlock=*/false, ObjCSubsts);
else {
if (ObjCSubsts)
- ParamType = ParamType.substObjCTypeArgs(Ctx, *ObjCSubsts,
- ObjCSubstitutionContext::Parameter);
+ ParamType = ParamType.substObjCTypeArgs(
+ Ctx, *ObjCSubsts, ObjCSubstitutionContext::Parameter);
Arg = "(" + formatObjCParamQualifiers((*P)->getObjCDeclQualifier(),
ParamType);
Arg += ParamType.getAsString(Policy) + ")";
@@ -3027,7 +3164,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
Ctx, Policy);
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.getAllocator().CopyString(ND->getNameAsString()));
return Result.TakeString();
}
@@ -3039,7 +3176,7 @@ const RawComment *clang::getCompletionComment(const ASTContext &Ctx,
return RC;
// Try to find comment from a property for ObjC methods.
- const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(ND);
+ const auto *M = dyn_cast<ObjCMethodDecl>(ND);
if (!M)
return nullptr;
const ObjCPropertyDecl *PDecl = M->findPropertyDecl();
@@ -3051,7 +3188,7 @@ const RawComment *clang::getCompletionComment(const ASTContext &Ctx,
const RawComment *clang::getPatternCompletionComment(const ASTContext &Ctx,
const NamedDecl *ND) {
- const ObjCMethodDecl *M = dyn_cast_or_null<ObjCMethodDecl>(ND);
+ const auto *M = dyn_cast_or_null<ObjCMethodDecl>(ND);
if (!M || !M->isPropertyAccessor())
return nullptr;
@@ -3074,8 +3211,7 @@ const RawComment *clang::getPatternCompletionComment(const ASTContext &Ctx,
const RawComment *clang::getParameterComment(
const ASTContext &Ctx,
- const CodeCompleteConsumer::OverloadCandidate &Result,
- unsigned ArgIndex) {
+ const CodeCompleteConsumer::OverloadCandidate &Result, unsigned ArgIndex) {
auto FDecl = Result.getFunction();
if (!FDecl)
return nullptr;
@@ -3091,12 +3227,11 @@ static void AddOverloadParameterChunks(ASTContext &Context,
const FunctionDecl *Function,
const FunctionProtoType *Prototype,
CodeCompletionBuilder &Result,
- unsigned CurrentArg,
- unsigned Start = 0,
+ unsigned CurrentArg, unsigned Start = 0,
bool InOptional = false) {
bool FirstParameter = true;
- unsigned NumParams = Function ? Function->getNumParams()
- : Prototype->getNumParams();
+ unsigned NumParams =
+ Function ? Function->getNumParams() : Prototype->getNumParams();
for (unsigned P = Start; P != NumParams; ++P) {
if (Function && Function->getParamDecl(P)->hasDefaultArg() && !InOptional) {
@@ -3126,14 +3261,15 @@ static void AddOverloadParameterChunks(ASTContext &Context,
const ParmVarDecl *Param = Function->getParamDecl(P);
Placeholder = FormatFunctionParameter(Policy, Param);
if (Param->hasDefaultArg())
- Placeholder += GetDefaultValueString(Param, Context.getSourceManager(), Context.getLangOpts());
+ Placeholder += GetDefaultValueString(Param, Context.getSourceManager(),
+ Context.getLangOpts());
} else {
Placeholder = Prototype->getParamType(P).getAsString(Policy);
}
if (P == CurrentArg)
Result.AddCurrentParameterChunk(
- Result.getAllocator().CopyString(Placeholder));
+ Result.getAllocator().CopyString(Placeholder));
else
Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Placeholder));
}
@@ -3155,23 +3291,22 @@ static void AddOverloadParameterChunks(ASTContext &Context,
CodeCompletionString *
CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
- unsigned CurrentArg, Sema &S,
- CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- bool IncludeBriefComments) const {
+ unsigned CurrentArg, Sema &S, CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo, bool IncludeBriefComments) const {
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
// FIXME: Set priority, availability appropriately.
- CodeCompletionBuilder Result(Allocator,CCTUInfo, 1, CXAvailability_Available);
+ CodeCompletionBuilder Result(Allocator, CCTUInfo, 1,
+ CXAvailability_Available);
FunctionDecl *FDecl = getFunction();
- const FunctionProtoType *Proto
- = dyn_cast<FunctionProtoType>(getFunctionType());
+ const FunctionProtoType *Proto =
+ dyn_cast<FunctionProtoType>(getFunctionType());
if (!FDecl && !Proto) {
// Function without a prototype. Just give the return type and a
// highlighted ellipsis.
const FunctionType *FT = getFunctionType();
Result.AddResultTypeChunk(Result.getAllocator().CopyString(
- FT->getReturnType().getAsString(Policy)));
+ FT->getReturnType().getAsString(Policy)));
Result.AddChunk(CodeCompletionString::CK_LeftParen);
Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
Result.AddChunk(CodeCompletionString::CK_RightParen);
@@ -3185,10 +3320,9 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
}
AddResultTypeChunk(S.Context, Policy, FDecl, QualType(), Result);
Result.AddTextChunk(
- Result.getAllocator().CopyString(FDecl->getNameAsString()));
+ Result.getAllocator().CopyString(FDecl->getNameAsString()));
} else {
- Result.AddResultTypeChunk(
- Result.getAllocator().CopyString(
+ Result.AddResultTypeChunk(Result.getAllocator().CopyString(
Proto->getReturnType().getAsString(Policy)));
}
@@ -3218,8 +3352,7 @@ unsigned clang::getMacroUsagePriority(StringRef MacroName,
Priority = CCP_Constant;
// Treat "bool" as a type.
else if (MacroName.equals("bool"))
- Priority = CCP_Type + (LangOpts.ObjC1? CCD_bool_in_ObjC : 0);
-
+ Priority = CCP_Type + (LangOpts.ObjC ? CCD_bool_in_ObjC : 0);
return Priority;
}
@@ -3229,75 +3362,112 @@ CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
return CXCursor_UnexposedDecl;
switch (D->getKind()) {
- case Decl::Enum: return CXCursor_EnumDecl;
- case Decl::EnumConstant: return CXCursor_EnumConstantDecl;
- case Decl::Field: return CXCursor_FieldDecl;
- case Decl::Function:
- return CXCursor_FunctionDecl;
- case Decl::ObjCCategory: return CXCursor_ObjCCategoryDecl;
- case Decl::ObjCCategoryImpl: return CXCursor_ObjCCategoryImplDecl;
- case Decl::ObjCImplementation: return CXCursor_ObjCImplementationDecl;
-
- case Decl::ObjCInterface: return CXCursor_ObjCInterfaceDecl;
- case Decl::ObjCIvar: return CXCursor_ObjCIvarDecl;
- case Decl::ObjCMethod:
- return cast<ObjCMethodDecl>(D)->isInstanceMethod()
- ? CXCursor_ObjCInstanceMethodDecl : CXCursor_ObjCClassMethodDecl;
- case Decl::CXXMethod: return CXCursor_CXXMethod;
- case Decl::CXXConstructor: return CXCursor_Constructor;
- case Decl::CXXDestructor: return CXCursor_Destructor;
- case Decl::CXXConversion: return CXCursor_ConversionFunction;
- case Decl::ObjCProperty: return CXCursor_ObjCPropertyDecl;
- case Decl::ObjCProtocol: return CXCursor_ObjCProtocolDecl;
- case Decl::ParmVar: return CXCursor_ParmDecl;
- case Decl::Typedef: return CXCursor_TypedefDecl;
- case Decl::TypeAlias: return CXCursor_TypeAliasDecl;
- case Decl::TypeAliasTemplate: return CXCursor_TypeAliasTemplateDecl;
- case Decl::Var: return CXCursor_VarDecl;
- case Decl::Namespace: return CXCursor_Namespace;
- case Decl::NamespaceAlias: return CXCursor_NamespaceAlias;
- case Decl::TemplateTypeParm: return CXCursor_TemplateTypeParameter;
- case Decl::NonTypeTemplateParm:return CXCursor_NonTypeTemplateParameter;
- case Decl::TemplateTemplateParm:return CXCursor_TemplateTemplateParameter;
- case Decl::FunctionTemplate: return CXCursor_FunctionTemplate;
- case Decl::ClassTemplate: return CXCursor_ClassTemplate;
- case Decl::AccessSpec: return CXCursor_CXXAccessSpecifier;
- case Decl::ClassTemplatePartialSpecialization:
- return CXCursor_ClassTemplatePartialSpecialization;
- case Decl::UsingDirective: return CXCursor_UsingDirective;
- case Decl::StaticAssert: return CXCursor_StaticAssert;
- case Decl::Friend: return CXCursor_FriendDecl;
- case Decl::TranslationUnit: return CXCursor_TranslationUnit;
-
- case Decl::Using:
- case Decl::UnresolvedUsingValue:
- case Decl::UnresolvedUsingTypename:
- return CXCursor_UsingDeclaration;
-
- case Decl::ObjCPropertyImpl:
- switch (cast<ObjCPropertyImplDecl>(D)->getPropertyImplementation()) {
- case ObjCPropertyImplDecl::Dynamic:
- return CXCursor_ObjCDynamicDecl;
-
- case ObjCPropertyImplDecl::Synthesize:
- return CXCursor_ObjCSynthesizeDecl;
- }
-
- case Decl::Import:
- return CXCursor_ModuleImportDecl;
-
- case Decl::ObjCTypeParam: return CXCursor_TemplateTypeParameter;
+ case Decl::Enum:
+ return CXCursor_EnumDecl;
+ case Decl::EnumConstant:
+ return CXCursor_EnumConstantDecl;
+ case Decl::Field:
+ return CXCursor_FieldDecl;
+ case Decl::Function:
+ return CXCursor_FunctionDecl;
+ case Decl::ObjCCategory:
+ return CXCursor_ObjCCategoryDecl;
+ case Decl::ObjCCategoryImpl:
+ return CXCursor_ObjCCategoryImplDecl;
+ case Decl::ObjCImplementation:
+ return CXCursor_ObjCImplementationDecl;
+
+ case Decl::ObjCInterface:
+ return CXCursor_ObjCInterfaceDecl;
+ case Decl::ObjCIvar:
+ return CXCursor_ObjCIvarDecl;
+ case Decl::ObjCMethod:
+ return cast<ObjCMethodDecl>(D)->isInstanceMethod()
+ ? CXCursor_ObjCInstanceMethodDecl
+ : CXCursor_ObjCClassMethodDecl;
+ case Decl::CXXMethod:
+ return CXCursor_CXXMethod;
+ case Decl::CXXConstructor:
+ return CXCursor_Constructor;
+ case Decl::CXXDestructor:
+ return CXCursor_Destructor;
+ case Decl::CXXConversion:
+ return CXCursor_ConversionFunction;
+ case Decl::ObjCProperty:
+ return CXCursor_ObjCPropertyDecl;
+ case Decl::ObjCProtocol:
+ return CXCursor_ObjCProtocolDecl;
+ case Decl::ParmVar:
+ return CXCursor_ParmDecl;
+ case Decl::Typedef:
+ return CXCursor_TypedefDecl;
+ case Decl::TypeAlias:
+ return CXCursor_TypeAliasDecl;
+ case Decl::TypeAliasTemplate:
+ return CXCursor_TypeAliasTemplateDecl;
+ case Decl::Var:
+ return CXCursor_VarDecl;
+ case Decl::Namespace:
+ return CXCursor_Namespace;
+ case Decl::NamespaceAlias:
+ return CXCursor_NamespaceAlias;
+ case Decl::TemplateTypeParm:
+ return CXCursor_TemplateTypeParameter;
+ case Decl::NonTypeTemplateParm:
+ return CXCursor_NonTypeTemplateParameter;
+ case Decl::TemplateTemplateParm:
+ return CXCursor_TemplateTemplateParameter;
+ case Decl::FunctionTemplate:
+ return CXCursor_FunctionTemplate;
+ case Decl::ClassTemplate:
+ return CXCursor_ClassTemplate;
+ case Decl::AccessSpec:
+ return CXCursor_CXXAccessSpecifier;
+ case Decl::ClassTemplatePartialSpecialization:
+ return CXCursor_ClassTemplatePartialSpecialization;
+ case Decl::UsingDirective:
+ return CXCursor_UsingDirective;
+ case Decl::StaticAssert:
+ return CXCursor_StaticAssert;
+ case Decl::Friend:
+ return CXCursor_FriendDecl;
+ case Decl::TranslationUnit:
+ return CXCursor_TranslationUnit;
+
+ case Decl::Using:
+ case Decl::UnresolvedUsingValue:
+ case Decl::UnresolvedUsingTypename:
+ return CXCursor_UsingDeclaration;
+
+ case Decl::ObjCPropertyImpl:
+ switch (cast<ObjCPropertyImplDecl>(D)->getPropertyImplementation()) {
+ case ObjCPropertyImplDecl::Dynamic:
+ return CXCursor_ObjCDynamicDecl;
+
+ case ObjCPropertyImplDecl::Synthesize:
+ return CXCursor_ObjCSynthesizeDecl;
+ }
+
+ case Decl::Import:
+ return CXCursor_ModuleImportDecl;
+
+ case Decl::ObjCTypeParam:
+ return CXCursor_TemplateTypeParameter;
- default:
- if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
- switch (TD->getTagKind()) {
- case TTK_Interface: // fall through
- case TTK_Struct: return CXCursor_StructDecl;
- case TTK_Class: return CXCursor_ClassDecl;
- case TTK_Union: return CXCursor_UnionDecl;
- case TTK_Enum: return CXCursor_EnumDecl;
- }
+ default:
+ if (const auto *TD = dyn_cast<TagDecl>(D)) {
+ switch (TD->getTagKind()) {
+ case TTK_Interface: // fall through
+ case TTK_Struct:
+ return CXCursor_StructDecl;
+ case TTK_Class:
+ return CXCursor_ClassDecl;
+ case TTK_Union:
+ return CXCursor_UnionDecl;
+ case TTK_Enum:
+ return CXCursor_EnumDecl;
}
+ }
}
return CXCursor_UnexposedDecl;
@@ -3351,8 +3521,8 @@ static void HandleCodeCompleteResults(Sema *S,
CodeCompleter->ProcessCodeCompleteResults(*S, Context, Results, NumResults);
}
-static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
- Sema::ParserCompletionContext PCC) {
+static CodeCompletionContext
+mapCodeCompletionContext(Sema &S, Sema::ParserCompletionContext PCC) {
switch (PCC) {
case Sema::PCC_Namespace:
return CodeCompletionContext::CCC_TopLevel;
@@ -3382,14 +3552,16 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
case Sema::PCC_ForInit:
if (S.getLangOpts().CPlusPlus || S.getLangOpts().C99 ||
- S.getLangOpts().ObjC1)
+ S.getLangOpts().ObjC)
return CodeCompletionContext::CCC_ParenthesizedExpression;
else
return CodeCompletionContext::CCC_Expression;
case Sema::PCC_Expression:
- case Sema::PCC_Condition:
return CodeCompletionContext::CCC_Expression;
+ case Sema::PCC_Condition:
+ return CodeCompletionContext(CodeCompletionContext::CCC_Expression,
+ S.getASTContext().BoolTy);
case Sema::PCC_Statement:
return CodeCompletionContext::CCC_Statement;
@@ -3422,7 +3594,6 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
while (isa<BlockDecl>(CurContext))
CurContext = CurContext->getParent();
-
CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(CurContext);
if (!Method || !Method->isVirtual())
return;
@@ -3442,9 +3613,8 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
// If we need a nested-name-specifier, add one now.
if (!InContext) {
- NestedNameSpecifier *NNS
- = getRequiredQualification(S.Context, CurContext,
- Overridden->getDeclContext());
+ NestedNameSpecifier *NNS = getRequiredQualification(
+ S.Context, CurContext, Overridden->getDeclContext());
if (NNS) {
std::string Str;
llvm::raw_string_ostream OS(Str);
@@ -3454,8 +3624,8 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
} else if (!InContext->Equals(Overridden->getDeclContext()))
continue;
- Builder.AddTypedTextChunk(Results.getAllocator().CopyString(
- Overridden->getNameAsString()));
+ Builder.AddTypedTextChunk(
+ Results.getAllocator().CopyString(Overridden->getNameAsString()));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
bool FirstParam = true;
for (auto P : Method->parameters()) {
@@ -3468,11 +3638,9 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
Results.getAllocator().CopyString(P->getIdentifier()->getName()));
}
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- CCP_SuperCompletion,
- CXCursor_CXXMethod,
- CXAvailability_Available,
- Overridden));
+ Results.AddResult(CodeCompletionResult(
+ Builder.TakeString(), CCP_SuperCompletion, CXCursor_CXXMethod,
+ CXAvailability_Available, Overridden));
Results.Ignore(Overridden);
}
}
@@ -3494,39 +3662,35 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
PP.getHeaderSearchInfo().collectAllModules(Modules);
for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(Modules[I]->Name));
- Results.AddResult(Result(Builder.TakeString(),
- CCP_Declaration,
- CXCursor_ModuleImportDecl,
- Modules[I]->isAvailable()
- ? CXAvailability_Available
- : CXAvailability_NotAvailable));
+ Builder.getAllocator().CopyString(Modules[I]->Name));
+ Results.AddResult(Result(
+ Builder.TakeString(), CCP_Declaration, CXCursor_ModuleImportDecl,
+ Modules[I]->isAvailable() ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
}
} else if (getLangOpts().Modules) {
// Load the named module.
- Module *Mod = PP.getModuleLoader().loadModule(ImportLoc, Path,
- Module::AllVisible,
- /*IsInclusionDirective=*/false);
+ Module *Mod =
+ PP.getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
+ /*IsInclusionDirective=*/false);
// Enumerate submodules.
if (Mod) {
for (Module::submodule_iterator Sub = Mod->submodule_begin(),
- SubEnd = Mod->submodule_end();
+ SubEnd = Mod->submodule_end();
Sub != SubEnd; ++Sub) {
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString((*Sub)->Name));
- Results.AddResult(Result(Builder.TakeString(),
- CCP_Declaration,
- CXCursor_ModuleImportDecl,
- (*Sub)->isAvailable()
- ? CXAvailability_Available
- : CXAvailability_NotAvailable));
+ Builder.getAllocator().CopyString((*Sub)->Name));
+ Results.AddResult(Result(
+ Builder.TakeString(), CCP_Declaration, CXCursor_ModuleImportDecl,
+ (*Sub)->isAvailable() ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
}
}
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(),Results.size());
+ Results.data(), Results.size());
}
void Sema::CodeCompleteOrdinaryName(Scope *S,
@@ -3573,10 +3737,11 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
// If we are in a C++ non-static member function, check the qualifiers on
// the member function to filter/prioritize the results list.
- if (CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext))
- if (CurMethod->isInstance())
- Results.setObjectTypeQualifiers(
- Qualifiers::fromCVRMask(CurMethod->getTypeQualifiers()));
+ if (CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext)) {
+ if (CurMethod->isInstance()) {
+ Results.setObjectTypeQualifiers(CurMethod->getTypeQualifiers());
+ }
+ }
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
@@ -3613,25 +3778,27 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(),Results.size());
+ Results.data(), Results.size());
}
static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
- bool AtArgumentExpression,
- bool IsSuper,
+ bool AtArgumentExpression, bool IsSuper,
ResultBuilder &Results);
void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- AllowNestedNameSpecifiers
- ? CodeCompletionContext::CCC_PotentiallyQualifiedName
- : CodeCompletionContext::CCC_Name);
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ AllowNestedNameSpecifiers
+ // FIXME: Try to separate codepath leading here to deduce whether we
+ // need an existing symbol or a new one.
+ ? CodeCompletionContext::CCC_SymbolOrNewName
+ : CodeCompletionContext::CCC_NewName);
Results.EnterNewScope();
// Type qualifiers can come after names.
@@ -3672,12 +3839,11 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
DS.getTypeSpecType() == DeclSpec::TST_typename &&
DS.getTypeSpecComplex() == DeclSpec::TSC_unspecified &&
DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
- !DS.isTypeAltiVecVector() &&
- S &&
+ !DS.isTypeAltiVecVector() && S &&
(S->getFlags() & Scope::DeclScope) != 0 &&
(S->getFlags() & (Scope::ClassScope | Scope::TemplateParamScope |
- Scope::FunctionPrototypeScope |
- Scope::AtCatchScope)) == 0) {
+ Scope::FunctionPrototypeScope | Scope::AtCatchScope)) ==
+ 0) {
ParsedType T = DS.getRepAsType();
if (!T.get().isNull() && T.get()->isObjCObjectOrInterfaceType())
AddClassMessageCompletions(*this, S, T, None, false, false, Results);
@@ -3686,15 +3852,14 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
// Note that we intentionally suppress macro results here, since we do not
// encourage using macros to produce the names of entities.
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
struct Sema::CodeCompleteExpressionData {
CodeCompleteExpressionData(QualType PreferredType = QualType())
- : PreferredType(PreferredType), IntegralConstantExpression(false),
- ObjCCollection(false) { }
+ : PreferredType(PreferredType), IntegralConstantExpression(false),
+ ObjCCollection(false) {}
QualType PreferredType;
bool IntegralConstantExpression;
@@ -3738,12 +3903,11 @@ void Sema::CodeCompleteExpression(Scope *S,
bool PreferredTypeIsPointer = false;
if (!Data.PreferredType.isNull())
- PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType()
- || Data.PreferredType->isMemberPointerType()
- || Data.PreferredType->isBlockPointerType();
+ PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType() ||
+ Data.PreferredType->isMemberPointerType() ||
+ Data.PreferredType->isBlockPointerType();
- if (S->getFnParent() &&
- !Data.ObjCCollection &&
+ if (S->getFnParent() && !Data.ObjCCollection &&
!Data.IntegralConstantExpression)
AddPrettyFunctionResults(getLangOpts(), Results);
@@ -3761,13 +3925,13 @@ void Sema::CodeCompleteExpression(Scope *S, QualType PreferredType) {
void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
if (E.isInvalid())
CodeCompleteOrdinaryName(S, PCC_RecoveryInFunction);
- else if (getLangOpts().ObjC1)
+ else if (getLangOpts().ObjC)
CodeCompleteObjCInstanceMessage(S, E.get(), None, false);
}
/// The set of properties that have already been added, referenced by
/// property name.
-typedef llvm::SmallPtrSet<IdentifierInfo*, 16> AddedPropertiesSet;
+typedef llvm::SmallPtrSet<IdentifierInfo *, 16> AddedPropertiesSet;
/// Retrieve the container definition, if any?
static ObjCContainerDecl *getContainerDef(ObjCContainerDecl *Container) {
@@ -3825,11 +3989,13 @@ static void AddObjCBlockCall(ASTContext &Context, const PrintingPolicy &Policy,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-static void AddObjCProperties(
- const CodeCompletionContext &CCContext, ObjCContainerDecl *Container,
- bool AllowCategories, bool AllowNullaryMethods, DeclContext *CurContext,
- AddedPropertiesSet &AddedProperties, ResultBuilder &Results,
- bool IsBaseExprStatement = false, bool IsClassProperty = false) {
+static void
+AddObjCProperties(const CodeCompletionContext &CCContext,
+ ObjCContainerDecl *Container, bool AllowCategories,
+ bool AllowNullaryMethods, DeclContext *CurContext,
+ AddedPropertiesSet &AddedProperties, ResultBuilder &Results,
+ bool IsBaseExprStatement = false,
+ bool IsClassProperty = false, bool InOriginalClass = true) {
typedef CodeCompletionResult Result;
// Retrieve the definition.
@@ -3844,8 +4010,10 @@ static void AddObjCProperties(
// expressions.
if (!P->getType().getTypePtr()->isBlockPointerType() ||
!IsBaseExprStatement) {
- Results.MaybeAddResult(Result(P, Results.getBasePriority(P), nullptr),
- CurContext);
+ Result R = Result(P, Results.getBasePriority(P), nullptr);
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
return;
}
@@ -3856,8 +4024,10 @@ static void AddObjCProperties(
findTypeLocationForBlockDecl(P->getTypeSourceInfo(), BlockLoc,
BlockProtoLoc);
if (!BlockLoc) {
- Results.MaybeAddResult(Result(P, Results.getBasePriority(P), nullptr),
- CurContext);
+ Result R = Result(P, Results.getBasePriority(P), nullptr);
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
return;
}
@@ -3868,9 +4038,10 @@ static void AddObjCProperties(
AddObjCBlockCall(Container->getASTContext(),
getCompletionPrintingPolicy(Results.getSema()), Builder, P,
BlockLoc, BlockProtoLoc);
- Results.MaybeAddResult(
- Result(Builder.TakeString(), P, Results.getBasePriority(P)),
- CurContext);
+ Result R = Result(Builder.TakeString(), P, Results.getBasePriority(P));
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
// Provide additional block setter completion iff the base expression is a
// statement and the block property is mutable.
@@ -3896,13 +4067,15 @@ static void AddObjCProperties(
// otherwise the setter completion should show up before the default
// property completion, as we normally want to use the result of the
// call.
- Results.MaybeAddResult(
+ Result R =
Result(Builder.TakeString(), P,
Results.getBasePriority(P) +
(BlockLoc.getTypePtr()->getReturnType()->isVoidType()
? CCD_BlockPropertySetter
- : -CCD_BlockPropertySetter)),
- CurContext);
+ : -CCD_BlockPropertySetter));
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
}
};
@@ -3930,10 +4103,11 @@ static void AddObjCProperties(
AddResultTypeChunk(Context, Policy, M, CCContext.getBaseType(), Builder);
Builder.AddTypedTextChunk(
Results.getAllocator().CopyString(Name->getName()));
- Results.MaybeAddResult(
- Result(Builder.TakeString(), M,
- CCP_MemberDeclaration + CCD_MethodAsProperty),
- CurContext);
+ Result R = Result(Builder.TakeString(), M,
+ CCP_MemberDeclaration + CCD_MethodAsProperty);
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
};
if (IsClassProperty) {
@@ -3959,42 +4133,47 @@ static void AddObjCProperties(
for (auto *P : Protocol->protocols())
AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results,
- IsBaseExprStatement, IsClassProperty);
- } else if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)){
+ IsBaseExprStatement, IsClassProperty,
+ /*InOriginalClass*/ false);
+ } else if (ObjCInterfaceDecl *IFace =
+ dyn_cast<ObjCInterfaceDecl>(Container)) {
if (AllowCategories) {
// Look through categories.
for (auto *Cat : IFace->known_categories())
AddObjCProperties(CCContext, Cat, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results,
- IsBaseExprStatement, IsClassProperty);
+ IsBaseExprStatement, IsClassProperty,
+ InOriginalClass);
}
// Look through protocols.
for (auto *I : IFace->all_referenced_protocols())
AddObjCProperties(CCContext, I, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results,
- IsBaseExprStatement, IsClassProperty);
+ IsBaseExprStatement, IsClassProperty,
+ /*InOriginalClass*/ false);
// Look in the superclass.
if (IFace->getSuperClass())
AddObjCProperties(CCContext, IFace->getSuperClass(), AllowCategories,
AllowNullaryMethods, CurContext, AddedProperties,
- Results, IsBaseExprStatement, IsClassProperty);
- } else if (const ObjCCategoryDecl *Category
- = dyn_cast<ObjCCategoryDecl>(Container)) {
+ Results, IsBaseExprStatement, IsClassProperty,
+ /*InOriginalClass*/ false);
+ } else if (const auto *Category =
+ dyn_cast<ObjCCategoryDecl>(Container)) {
// Look through protocols.
for (auto *P : Category->protocols())
AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results,
- IsBaseExprStatement, IsClassProperty);
+ IsBaseExprStatement, IsClassProperty,
+ /*InOriginalClass*/ false);
}
}
-static void AddRecordMembersCompletionResults(Sema &SemaRef,
- ResultBuilder &Results, Scope *S,
- QualType BaseType,
- RecordDecl *RD,
- Optional<FixItHint> AccessOpFixIt) {
+static void
+AddRecordMembersCompletionResults(Sema &SemaRef, ResultBuilder &Results,
+ Scope *S, QualType BaseType, RecordDecl *RD,
+ Optional<FixItHint> AccessOpFixIt) {
// Indicate that we are performing a member access, and the cv-qualifiers
// for the base object type.
Results.setObjectTypeQualifiers(BaseType.getQualifiers());
@@ -4003,8 +4182,8 @@ static void AddRecordMembersCompletionResults(Sema &SemaRef,
Results.allowNestedNameSpecifiers();
std::vector<FixItHint> FixIts;
if (AccessOpFixIt)
- FixIts.emplace_back(AccessOpFixIt.getValue());
- CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext, std::move(FixIts));
+ FixIts.emplace_back(AccessOpFixIt.getValue());
+ CodeCompletionDeclConsumer Consumer(Results, RD, BaseType, std::move(FixIts));
SemaRef.LookupVisibleDecls(RD, Sema::LookupMemberName, Consumer,
SemaRef.CodeCompleter->includeGlobals(),
/*IncludeDependentBases=*/true,
@@ -4045,7 +4224,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
enum CodeCompletionContext::Kind contextKind;
if (IsArrow) {
- if (const PointerType *Ptr = ConvertedBaseType->getAs<PointerType>())
+ if (const auto *Ptr = ConvertedBaseType->getAs<PointerType>())
ConvertedBaseType = Ptr->getPointeeType();
}
@@ -4065,7 +4244,8 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
CodeCompleter->getCodeCompletionTUInfo(), CCContext,
&ResultBuilder::IsMember);
- auto DoCompletion = [&](Expr *Base, bool IsArrow, Optional<FixItHint> AccessOpFixIt) -> bool {
+ auto DoCompletion = [&](Expr *Base, bool IsArrow,
+ Optional<FixItHint> AccessOpFixIt) -> bool {
if (!Base)
return false;
@@ -4119,7 +4299,8 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
for (auto *I : BaseType->getAs<ObjCObjectPointerType>()->quals())
AddObjCProperties(CCContext, I, true, /*AllowNullaryMethods=*/true,
CurContext, AddedProperties, Results,
- IsBaseExprStatement);
+ IsBaseExprStatement, /*IsClassProperty*/ false,
+ /*InOriginalClass*/ false);
} else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
(!IsArrow && BaseType->isObjCObjectType())) {
// Objective-C instance variable access.
@@ -4132,7 +4313,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
// Add all ivars from this class and its superclasses.
if (Class) {
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ CodeCompletionDeclConsumer Consumer(Results, Class, BaseType);
Results.setFilter(&ResultBuilder::IsObjCIvar);
LookupVisibleDecls(
Class, LookupMemberName, Consumer, CodeCompleter->includeGlobals(),
@@ -4194,8 +4375,8 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
return;
ResultBuilder::LookupFilter Filter = nullptr;
- enum CodeCompletionContext::Kind ContextKind
- = CodeCompletionContext::CCC_Other;
+ enum CodeCompletionContext::Kind ContextKind =
+ CodeCompletionContext::CCC_Other;
switch ((DeclSpec::TST)TagSpec) {
case DeclSpec::TST_enum:
Filter = &ResultBuilder::IsEnum;
@@ -4237,7 +4418,7 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
}
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(),Results.size());
+ Results.data(), Results.size());
}
static void AddTypeQualifierResults(DeclSpec &DS, ResultBuilder &Results,
@@ -4261,8 +4442,7 @@ void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
Results.EnterNewScope();
AddTypeQualifierResults(DS, Results, LangOpts);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -4297,6 +4477,9 @@ void Sema::CodeCompleteCase(Scope *S) {
return;
SwitchStmt *Switch = getCurFunction()->SwitchStack.back().getPointer();
+ // Condition expression might be invalid, do not continue in this case.
+ if (!Switch->getCond())
+ return;
QualType type = Switch->getCond()->IgnoreImplicit()->getType();
if (!type->isEnumeralType()) {
CodeCompleteExpressionData Data(type);
@@ -4324,9 +4507,9 @@ void Sema::CodeCompleteCase(Scope *S) {
continue;
Expr *CaseVal = Case->getLHS()->IgnoreParenCasts();
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseVal))
- if (EnumConstantDecl *Enumerator
- = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(CaseVal))
+ if (auto *Enumerator =
+ dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
// We look into the AST of the case statement to determine which
// enumerator was named. Alternatively, we could compute the value of
// the integral constant expression, then compare it against the
@@ -4391,10 +4574,9 @@ static bool anyNullArguments(ArrayRef<Expr *> Args) {
typedef CodeCompleteConsumer::OverloadCandidate ResultCandidate;
-static void mergeCandidatesWithResults(Sema &SemaRef,
- SmallVectorImpl<ResultCandidate> &Results,
- OverloadCandidateSet &CandidateSet,
- SourceLocation Loc) {
+static void mergeCandidatesWithResults(
+ Sema &SemaRef, SmallVectorImpl<ResultCandidate> &Results,
+ OverloadCandidateSet &CandidateSet, SourceLocation Loc) {
if (!CandidateSet.empty()) {
// Sort the overload candidate set by placing the best overloads first.
std::stable_sort(
@@ -4405,7 +4587,7 @@ static void mergeCandidatesWithResults(Sema &SemaRef,
});
// Add the remaining viable overload candidates as code-completion results.
- for (auto &Candidate : CandidateSet) {
+ for (OverloadCandidate &Candidate : CandidateSet) {
if (Candidate.Function && Candidate.Function->isDeleted())
continue;
if (Candidate.Viable)
@@ -4417,22 +4599,21 @@ static void mergeCandidatesWithResults(Sema &SemaRef,
/// Get the type of the Nth parameter from a given set of overload
/// candidates.
static QualType getParamType(Sema &SemaRef,
- ArrayRef<ResultCandidate> Candidates,
- unsigned N) {
+ ArrayRef<ResultCandidate> Candidates, unsigned N) {
// Given the overloads 'Candidates' for a function call matching all arguments
// up to N, return the type of the Nth parameter if it is the same for all
// overload candidates.
QualType ParamType;
for (auto &Candidate : Candidates) {
- if (auto FType = Candidate.getFunctionType())
- if (auto Proto = dyn_cast<FunctionProtoType>(FType))
+ if (const auto *FType = Candidate.getFunctionType())
+ if (const auto *Proto = dyn_cast<FunctionProtoType>(FType))
if (N < Proto->getNumParams()) {
if (ParamType.isNull())
ParamType = Proto->getParamType(N);
else if (!SemaRef.Context.hasSameUnqualifiedType(
- ParamType.getNonReferenceType(),
- Proto->getParamType(N).getNonReferenceType()))
+ ParamType.getNonReferenceType(),
+ Proto->getParamType(N).getNonReferenceType()))
// Otherwise return a default-constructed QualType.
return QualType();
}
@@ -4491,13 +4672,12 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
const bool FirstArgumentIsBase = !UME->isImplicitAccess() && UME->getBase();
AddFunctionCandidates(Decls, ArgExprs, CandidateSet, TemplateArgs,
/*SuppressUsedConversions=*/false,
- /*PartialOverloading=*/true,
- FirstArgumentIsBase);
+ /*PartialOverloading=*/true, FirstArgumentIsBase);
} else {
FunctionDecl *FD = nullptr;
- if (auto MCE = dyn_cast<MemberExpr>(NakedFn))
+ if (auto *MCE = dyn_cast<MemberExpr>(NakedFn))
FD = dyn_cast<FunctionDecl>(MCE->getMemberDecl());
- else if (auto DRE = dyn_cast<DeclRefExpr>(NakedFn))
+ else if (auto *DRE = dyn_cast<DeclRefExpr>(NakedFn))
FD = dyn_cast<FunctionDecl>(DRE->getDecl());
if (FD) { // We check whether it's a resolved function declaration.
if (!getLangOpts().CPlusPlus ||
@@ -4514,8 +4694,8 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
// call operator, so we check if it does and add them as candidates.
// A complete type is needed to lookup for member function call operators.
if (isCompleteType(Loc, NakedFn->getType())) {
- DeclarationName OpName = Context.DeclarationNames
- .getCXXOperatorName(OO_Call);
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Call);
LookupResult R(*this, OpName, Loc, LookupOrdinaryName);
LookupQualifiedName(R, DC);
R.suppressDiagnostics();
@@ -4535,7 +4715,7 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
if (auto FP = T->getAs<FunctionProtoType>()) {
if (!TooManyArguments(FP->getNumParams(), Args.size(),
- /*PartialOverloading=*/true) ||
+ /*PartialOverloading=*/true) ||
FP->isVariadic())
Results.push_back(ResultCandidate(FP));
} else if (auto FT = T->getAs<FunctionType>())
@@ -4567,19 +4747,18 @@ QualType Sema::ProduceConstructorSignatureHelp(Scope *S, QualType Type,
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
- for (auto C : LookupConstructors(RD)) {
- if (auto FD = dyn_cast<FunctionDecl>(C)) {
- AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()),
- Args, CandidateSet,
+ for (NamedDecl *C : LookupConstructors(RD)) {
+ if (auto *FD = dyn_cast<FunctionDecl>(C)) {
+ AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()), Args,
+ CandidateSet,
/*SuppressUsedConversions=*/false,
/*PartialOverloading=*/true);
- } else if (auto FTD = dyn_cast<FunctionTemplateDecl>(C)) {
- AddTemplateOverloadCandidate(FTD,
- DeclAccessPair::make(FTD, C->getAccess()),
- /*ExplicitTemplateArgs=*/nullptr,
- Args, CandidateSet,
- /*SuppressUsedConversions=*/false,
- /*PartialOverloading=*/true);
+ } else if (auto *FTD = dyn_cast<FunctionTemplateDecl>(C)) {
+ AddTemplateOverloadCandidate(
+ FTD, DeclAccessPair::make(FTD, C->getAccess()),
+ /*ExplicitTemplateArgs=*/nullptr, Args, CandidateSet,
+ /*SuppressUsedConversions=*/false,
+ /*PartialOverloading=*/true);
}
}
@@ -4614,7 +4793,12 @@ void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
return;
}
- CodeCompleteExpression(S, VD->getType());
+ CodeCompleteExpressionData Data;
+ Data.PreferredType = VD->getType();
+ // Ignore VD to avoid completing the variable itself, e.g. in 'int foo = ^'.
+ Data.IgnoreDecls.push_back(VD);
+
+ CodeCompleteExpression(S, Data);
}
void Sema::CodeCompleteReturn(Scope *S) {
@@ -4622,9 +4806,9 @@ void Sema::CodeCompleteReturn(Scope *S) {
if (isa<BlockDecl>(CurContext)) {
if (BlockScopeInfo *BSI = getCurBlock())
ResultType = BSI->ReturnType;
- } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(CurContext))
+ } else if (const auto *Function = dyn_cast<FunctionDecl>(CurContext))
ResultType = Function->getReturnType();
- else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(CurContext))
+ else if (const auto *Method = dyn_cast<ObjCMethodDecl>(CurContext))
ResultType = Method->getReturnType();
if (ResultType.isNull())
@@ -4691,18 +4875,96 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(),Results.size());
+ Results.data(), Results.size());
+}
+
+static QualType getPreferredTypeOfBinaryRHS(Sema &S, Expr *LHS,
+ tok::TokenKind Op) {
+ if (!LHS)
+ return QualType();
+
+ QualType LHSType = LHS->getType();
+ if (LHSType->isPointerType()) {
+ if (Op == tok::plus || Op == tok::plusequal || Op == tok::minusequal)
+ return S.getASTContext().getPointerDiffType();
+ // Pointer difference is more common than subtracting an int from a pointer.
+ if (Op == tok::minus)
+ return LHSType;
+ }
+
+ switch (Op) {
+ // No way to infer the type of RHS from LHS.
+ case tok::comma:
+ return QualType();
+ // Prefer the type of the left operand for all of these.
+ // Arithmetic operations.
+ case tok::plus:
+ case tok::plusequal:
+ case tok::minus:
+ case tok::minusequal:
+ case tok::percent:
+ case tok::percentequal:
+ case tok::slash:
+ case tok::slashequal:
+ case tok::star:
+ case tok::starequal:
+ // Assignment.
+ case tok::equal:
+ // Comparison operators.
+ case tok::equalequal:
+ case tok::exclaimequal:
+ case tok::less:
+ case tok::lessequal:
+ case tok::greater:
+ case tok::greaterequal:
+ case tok::spaceship:
+ return LHS->getType();
+ // Binary shifts are often overloaded, so don't try to guess those.
+ case tok::greatergreater:
+ case tok::greatergreaterequal:
+ case tok::lessless:
+ case tok::lesslessequal:
+ if (LHSType->isIntegralOrEnumerationType())
+ return S.getASTContext().IntTy;
+ return QualType();
+ // Logical operators, assume we want bool.
+ case tok::ampamp:
+ case tok::pipepipe:
+ case tok::caretcaret:
+ return S.getASTContext().BoolTy;
+ // Operators often used for bit manipulation are typically used with the type
+ // of the left argument.
+ case tok::pipe:
+ case tok::pipeequal:
+ case tok::caret:
+ case tok::caretequal:
+ case tok::amp:
+ case tok::ampequal:
+ if (LHSType->isIntegralOrEnumerationType())
+ return LHSType;
+ return QualType();
+ // RHS should be a pointer to a member of the 'LHS' type, but we can't give
+ // any particular type here.
+ case tok::periodstar:
+ case tok::arrowstar:
+ return QualType();
+ default:
+ // FIXME(ibiryukov): handle the missing op, re-add the assertion.
+ // assert(false && "unhandled binary op");
+ return QualType();
+ }
}
-void Sema::CodeCompleteAssignmentRHS(Scope *S, Expr *LHS) {
- if (LHS)
- CodeCompleteExpression(S, static_cast<Expr *>(LHS)->getType());
+void Sema::CodeCompleteBinaryRHS(Scope *S, Expr *LHS, tok::TokenKind Op) {
+ auto PreferredType = getPreferredTypeOfBinaryRHS(*this, LHS, Op);
+ if (!PreferredType.isNull())
+ CodeCompleteExpression(S, PreferredType);
else
CodeCompleteOrdinaryName(S, PCC_Expression);
}
void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
- bool EnteringContext) {
+ bool EnteringContext, QualType BaseType) {
if (SS.isEmpty() || !CodeCompleter)
return;
@@ -4711,7 +4973,7 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// it can be useful for global code completion which have information about
// contexts/symbols that are not in the AST.
if (SS.isInvalid()) {
- CodeCompletionContext CC(CodeCompletionContext::CCC_Name);
+ CodeCompletionContext CC(CodeCompletionContext::CCC_Symbol);
CC.setCXXScopeSpecifier(SS);
HandleCodeCompleteResults(this, CodeCompleter, CC, nullptr, 0);
return;
@@ -4729,7 +4991,7 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_Name);
+ CodeCompletionContext::CCC_Symbol);
Results.EnterNewScope();
// The "template" keyword can follow "::" in the grammar, but only
@@ -4749,7 +5011,7 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
if (CodeCompleter->includeNamespaceLevelDecls() ||
(!Ctx->isNamespace() && !Ctx->isTranslationUnit())) {
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ CodeCompletionDeclConsumer Consumer(Results, Ctx, BaseType);
LookupVisibleDecls(Ctx, LookupOrdinaryName, Consumer,
/*IncludeGlobalScope=*/true,
/*IncludeDependentBases=*/true,
@@ -4769,7 +5031,10 @@ void Sema::CodeCompleteUsing(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_PotentiallyQualifiedName,
+ // This can be both a using alias or using
+ // declaration, in the former we expect a new name and a
+ // symbol in the latter case.
+ CodeCompletionContext::CCC_SymbolOrNewName,
&ResultBuilder::IsNestedNameSpecifier);
Results.EnterNewScope();
@@ -4809,7 +5074,7 @@ void Sema::CodeCompleteUsingDirective(Scope *S) {
Results.data(), Results.size());
}
-void Sema::CodeCompleteNamespaceDecl(Scope *S) {
+void Sema::CodeCompleteNamespaceDecl(Scope *S) {
if (!CodeCompleter)
return;
@@ -4817,14 +5082,14 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
if (!S->getParent())
Ctx = Context.getTranslationUnitDecl();
- bool SuppressedGlobalResults
- = Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
+ bool SuppressedGlobalResults =
+ Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
SuppressedGlobalResults
- ? CodeCompletionContext::CCC_Namespace
- : CodeCompletionContext::CCC_Other,
+ ? CodeCompletionContext::CCC_Namespace
+ : CodeCompletionContext::CCC_Other,
&ResultBuilder::IsNamespace);
if (Ctx && Ctx->isFileContext() && !SuppressedGlobalResults) {
@@ -4834,7 +5099,8 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
// definition of each namespace.
std::map<NamespaceDecl *, NamespaceDecl *> OrigToLatest;
for (DeclContext::specific_decl_iterator<NamespaceDecl>
- NS(Ctx->decls_begin()), NSEnd(Ctx->decls_end());
+ NS(Ctx->decls_begin()),
+ NSEnd(Ctx->decls_end());
NS != NSEnd; ++NS)
OrigToLatest[NS->getOriginalNamespace()] = *NS;
@@ -4842,22 +5108,21 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
// namespace to the list of results.
Results.EnterNewScope();
for (std::map<NamespaceDecl *, NamespaceDecl *>::iterator
- NS = OrigToLatest.begin(),
- NSEnd = OrigToLatest.end();
+ NS = OrigToLatest.begin(),
+ NSEnd = OrigToLatest.end();
NS != NSEnd; ++NS)
- Results.AddResult(CodeCompletionResult(
- NS->second, Results.getBasePriority(NS->second),
- nullptr),
- CurContext, nullptr, false);
+ Results.AddResult(
+ CodeCompletionResult(NS->second, Results.getBasePriority(NS->second),
+ nullptr),
+ CurContext, nullptr, false);
Results.ExitScope();
}
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
+void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
if (!CodeCompleter)
return;
@@ -4870,9 +5135,8 @@ void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals(),
CodeCompleter->loadExternal());
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteOperatorName(Scope *S) {
@@ -4887,8 +5151,8 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
Results.EnterNewScope();
// Add the names of overloadable operators.
-#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
- if (std::strcmp(Spelling, "?")) \
+#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
+ if (std::strcmp(Spelling, "?")) \
Results.AddResult(Result(Spelling));
#include "clang/Basic/OperatorKinds.def"
@@ -4908,20 +5172,19 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
}
void Sema::CodeCompleteConstructorInitializer(
- Decl *ConstructorD,
- ArrayRef <CXXCtorInitializer *> Initializers) {
+ Decl *ConstructorD, ArrayRef<CXXCtorInitializer *> Initializers) {
if (!ConstructorD)
return;
AdjustDeclIfTemplate(ConstructorD);
- CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorD);
+ auto *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorD);
if (!Constructor)
return;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_PotentiallyQualifiedName);
+ CodeCompletionContext::CCC_Symbol);
Results.EnterNewScope();
// Fill in any already-initialized fields or base classes.
@@ -4929,39 +5192,96 @@ void Sema::CodeCompleteConstructorInitializer(
llvm::SmallPtrSet<CanQualType, 4> InitializedBases;
for (unsigned I = 0, E = Initializers.size(); I != E; ++I) {
if (Initializers[I]->isBaseInitializer())
- InitializedBases.insert(
- Context.getCanonicalType(QualType(Initializers[I]->getBaseClass(), 0)));
+ InitializedBases.insert(Context.getCanonicalType(
+ QualType(Initializers[I]->getBaseClass(), 0)));
else
- InitializedFields.insert(cast<FieldDecl>(
- Initializers[I]->getAnyMember()));
+ InitializedFields.insert(
+ cast<FieldDecl>(Initializers[I]->getAnyMember()));
}
// Add completions for base classes.
- CodeCompletionBuilder Builder(Results.getAllocator(),
- Results.getCodeCompletionTUInfo());
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
bool SawLastInitializer = Initializers.empty();
CXXRecordDecl *ClassDecl = Constructor->getParent();
+
+ auto GenerateCCS = [&](const NamedDecl *ND, const char *Name) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk(Name);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (const auto *Function = dyn_cast<FunctionDecl>(ND))
+ AddFunctionParameterChunks(PP, Policy, Function, Builder);
+ else if (const auto *FunTemplDecl = dyn_cast<FunctionTemplateDecl>(ND))
+ AddFunctionParameterChunks(PP, Policy, FunTemplDecl->getTemplatedDecl(),
+ Builder);
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ return Builder.TakeString();
+ };
+ auto AddDefaultCtorInit = [&](const char *Name, const char *Type,
+ const NamedDecl *ND) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk(Name);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk(Type);
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ if (ND) {
+ auto CCR = CodeCompletionResult(
+ Builder.TakeString(), ND,
+ SawLastInitializer ? CCP_NextInitializer : CCP_MemberDeclaration);
+ if (isa<FieldDecl>(ND))
+ CCR.CursorKind = CXCursor_MemberRef;
+ return Results.AddResult(CCR);
+ }
+ return Results.AddResult(CodeCompletionResult(
+ Builder.TakeString(),
+ SawLastInitializer ? CCP_NextInitializer : CCP_MemberDeclaration));
+ };
+ auto AddCtorsWithName = [&](const CXXRecordDecl *RD, unsigned int Priority,
+ const char *Name, const FieldDecl *FD) {
+ if (!RD)
+ return AddDefaultCtorInit(Name,
+ FD ? Results.getAllocator().CopyString(
+ FD->getType().getAsString(Policy))
+ : Name,
+ FD);
+ auto Ctors = getConstructors(Context, RD);
+ if (Ctors.begin() == Ctors.end())
+ return AddDefaultCtorInit(Name, Name, RD);
+ for (const NamedDecl *Ctor : Ctors) {
+ auto CCR = CodeCompletionResult(GenerateCCS(Ctor, Name), RD, Priority);
+ CCR.CursorKind = getCursorKindForDecl(Ctor);
+ Results.AddResult(CCR);
+ }
+ };
+ auto AddBase = [&](const CXXBaseSpecifier &Base) {
+ const char *BaseName =
+ Results.getAllocator().CopyString(Base.getType().getAsString(Policy));
+ const auto *RD = Base.getType()->getAsCXXRecordDecl();
+ AddCtorsWithName(
+ RD, SawLastInitializer ? CCP_NextInitializer : CCP_MemberDeclaration,
+ BaseName, nullptr);
+ };
+ auto AddField = [&](const FieldDecl *FD) {
+ const char *FieldName =
+ Results.getAllocator().CopyString(FD->getIdentifier()->getName());
+ const CXXRecordDecl *RD = FD->getType()->getAsCXXRecordDecl();
+ AddCtorsWithName(
+ RD, SawLastInitializer ? CCP_NextInitializer : CCP_MemberDeclaration,
+ FieldName, FD);
+ };
+
for (const auto &Base : ClassDecl->bases()) {
if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
.second) {
- SawLastInitializer
- = !Initializers.empty() &&
- Initializers.back()->isBaseInitializer() &&
- Context.hasSameUnqualifiedType(Base.getType(),
- QualType(Initializers.back()->getBaseClass(), 0));
+ SawLastInitializer =
+ !Initializers.empty() && Initializers.back()->isBaseInitializer() &&
+ Context.hasSameUnqualifiedType(
+ Base.getType(), QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
- Builder.AddTypedTextChunk(
- Results.getAllocator().CopyString(
- Base.getType().getAsString(Policy)));
- Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- Builder.AddPlaceholderChunk("args");
- Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- SawLastInitializer? CCP_NextInitializer
- : CCP_MemberDeclaration));
+ AddBase(Base);
SawLastInitializer = false;
}
@@ -4969,23 +5289,14 @@ void Sema::CodeCompleteConstructorInitializer(
for (const auto &Base : ClassDecl->vbases()) {
if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
.second) {
- SawLastInitializer
- = !Initializers.empty() &&
- Initializers.back()->isBaseInitializer() &&
- Context.hasSameUnqualifiedType(Base.getType(),
- QualType(Initializers.back()->getBaseClass(), 0));
+ SawLastInitializer =
+ !Initializers.empty() && Initializers.back()->isBaseInitializer() &&
+ Context.hasSameUnqualifiedType(
+ Base.getType(), QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
- Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(
- Base.getType().getAsString(Policy)));
- Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- Builder.AddPlaceholderChunk("args");
- Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- SawLastInitializer? CCP_NextInitializer
- : CCP_MemberDeclaration));
+ AddBase(Base);
SawLastInitializer = false;
}
@@ -4993,27 +5304,16 @@ void Sema::CodeCompleteConstructorInitializer(
for (auto *Field : ClassDecl->fields()) {
if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))
.second) {
- SawLastInitializer
- = !Initializers.empty() &&
- Initializers.back()->isAnyMemberInitializer() &&
- Initializers.back()->getAnyMember() == Field;
+ SawLastInitializer = !Initializers.empty() &&
+ Initializers.back()->isAnyMemberInitializer() &&
+ Initializers.back()->getAnyMember() == Field;
continue;
}
if (!Field->getDeclName())
continue;
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Field->getIdentifier()->getName()));
- Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- Builder.AddPlaceholderChunk("args");
- Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- SawLastInitializer? CCP_NextInitializer
- : CCP_MemberDeclaration,
- CXCursor_MemberRef,
- CXAvailability_Available,
- Field));
+ AddField(Field);
SawLastInitializer = false;
}
Results.ExitScope();
@@ -5054,9 +5354,7 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
for (; S && !isNamespaceScope(S); S = S->getParent()) {
for (const auto *D : S->decls()) {
const auto *Var = dyn_cast<VarDecl>(D);
- if (!Var ||
- !Var->hasLocalStorage() ||
- Var->hasAttr<BlocksAttr>())
+ if (!Var || !Var->hasLocalStorage() || Var->hasAttr<BlocksAttr>())
continue;
if (Known.insert(Var->getIdentifier()).second)
@@ -5077,26 +5375,25 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
/// Macro that optionally prepends an "@" to the string literal passed in via
/// Keyword, depending on whether NeedAt is true or false.
-#define OBJC_AT_KEYWORD_NAME(NeedAt,Keyword) ((NeedAt)? "@" Keyword : Keyword)
+#define OBJC_AT_KEYWORD_NAME(NeedAt, Keyword) ((NeedAt) ? "@" Keyword : Keyword)
static void AddObjCImplementationResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt) {
+ ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
// Since we have an implementation, we can end it.
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "end")));
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
- if (LangOpts.ObjC2) {
+ if (LangOpts.ObjC) {
// @dynamic
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"dynamic"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "dynamic"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
// @synthesize
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synthesize"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "synthesize"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
@@ -5104,22 +5401,21 @@ static void AddObjCImplementationResults(const LangOptions &LangOpts,
}
static void AddObjCInterfaceResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt) {
+ ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
// Since we have an interface or protocol, we can end it.
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "end")));
- if (LangOpts.ObjC2) {
+ if (LangOpts.ObjC) {
// @property
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"property")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "property")));
// @required
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"required")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "required")));
// @optional
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"optional")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "optional")));
}
}
@@ -5129,7 +5425,7 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
Results.getCodeCompletionTUInfo());
// @class name ;
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"class"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "class"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
@@ -5138,26 +5434,27 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
// @interface name
// FIXME: Could introduce the whole pattern, including superclasses and
// such.
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"interface"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "interface"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
// @protocol name
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "protocol"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("protocol");
Results.AddResult(Result(Builder.TakeString()));
// @implementation name
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"implementation"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "implementation"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
}
// @compatibility_alias name
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"compatibility_alias"));
+ Builder.AddTypedTextChunk(
+ OBJC_AT_KEYWORD_NAME(NeedAt, "compatibility_alias"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("alias");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -5200,7 +5497,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
Results.getSema().getLangOpts().ConstStrings)
EncodeType = "const char[]";
Builder.AddResultTypeChunk(EncodeType);
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"encode"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "encode"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("type-name");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -5208,7 +5505,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
// @protocol ( protocol-name )
Builder.AddResultTypeChunk("Protocol *");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "protocol"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("protocol-name");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -5216,7 +5513,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
// @selector ( selector )
Builder.AddResultTypeChunk("SEL");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"selector"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "selector"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("selector");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -5224,21 +5521,21 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
// @"string"
Builder.AddResultTypeChunk("NSString *");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"\""));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "\""));
Builder.AddPlaceholderChunk("string");
Builder.AddTextChunk("\"");
Results.AddResult(Result(Builder.TakeString()));
// @[objects, ...]
Builder.AddResultTypeChunk("NSArray *");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"["));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "["));
Builder.AddPlaceholderChunk("objects, ...");
Builder.AddChunk(CodeCompletionString::CK_RightBracket);
Results.AddResult(Result(Builder.TakeString()));
// @{key : object, ...}
Builder.AddResultTypeChunk("NSDictionary *");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"{"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "{"));
Builder.AddPlaceholderChunk("key");
Builder.AddChunk(CodeCompletionString::CK_Colon);
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -5262,7 +5559,7 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
if (Results.includeCodePatterns()) {
// @try { statements } @catch ( declaration ) { statements } @finally
// { statements }
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"try"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "try"));
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
@@ -5281,14 +5578,14 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
}
// @throw
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"throw"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "throw"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
if (Results.includeCodePatterns()) {
// @synchronized ( expression ) { statements }
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synchronized"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "synchronized"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
@@ -5301,14 +5598,13 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
}
static void AddObjCVisibilityResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt) {
+ ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"private")));
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"protected")));
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"public")));
- if (LangOpts.ObjC2)
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"package")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "private")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "protected")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "public")));
+ if (LangOpts.ObjC)
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "package")));
}
void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
@@ -5360,14 +5656,12 @@ static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
return true;
// Check for more than one of { assign, copy, retain, strong, weak }.
- unsigned AssignCopyRetMask = Attributes & (ObjCDeclSpec::DQ_PR_assign |
- ObjCDeclSpec::DQ_PR_unsafe_unretained |
- ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain |
- ObjCDeclSpec::DQ_PR_strong |
- ObjCDeclSpec::DQ_PR_weak);
- if (AssignCopyRetMask &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
+ unsigned AssignCopyRetMask =
+ Attributes &
+ (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong | ObjCDeclSpec::DQ_PR_weak);
+ if (AssignCopyRetMask && AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain &&
@@ -5445,11 +5739,10 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
enum ObjCMethodKind {
MK_Any, ///< Any kind of method, provided it means other specified criteria.
MK_ZeroArgSelector, ///< Zero-argument (unary) selector.
- MK_OneArgSelector ///< One-argument selector.
+ MK_OneArgSelector ///< One-argument selector.
};
-static bool isAcceptableObjCSelector(Selector Sel,
- ObjCMethodKind WantKind,
+static bool isAcceptableObjCSelector(Selector Sel, ObjCMethodKind WantKind,
ArrayRef<IdentifierInfo *> SelIdents,
bool AllowSameLength = true) {
unsigned NumSelIdents = SelIdents.size();
@@ -5457,9 +5750,12 @@ static bool isAcceptableObjCSelector(Selector Sel,
return false;
switch (WantKind) {
- case MK_Any: break;
- case MK_ZeroArgSelector: return Sel.isUnarySelector();
- case MK_OneArgSelector: return Sel.getNumArgs() == 1;
+ case MK_Any:
+ break;
+ case MK_ZeroArgSelector:
+ return Sel.isUnarySelector();
+ case MK_OneArgSelector:
+ return Sel.getNumArgs() == 1;
}
if (!AllowSameLength && NumSelIdents && NumSelIdents == Sel.getNumArgs())
@@ -5480,11 +5776,9 @@ static bool isAcceptableObjCMethod(ObjCMethodDecl *Method,
AllowSameLength);
}
-namespace {
- /// A set of selectors, which is used to avoid introducing multiple
- /// completions with the same selector into the result set.
- typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
-}
+/// A set of selectors, which is used to avoid introducing multiple
+/// completions with the same selector into the result set.
+typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
/// Add all of the Objective-C methods in the given Objective-C
/// container to the set of results.
@@ -5517,7 +5811,7 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
Container = getContainerDef(Container);
ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container);
IsRootClass = IsRootClass || (IFace && !IFace->getSuperClass());
- for (auto *M : Container->methods()) {
+ for (ObjCMethodDecl *M : Container->methods()) {
// The instance methods on the root class can be messaged via the
// metaclass.
if (M->isInstanceMethod() == WantInstanceMethods ||
@@ -5534,16 +5828,16 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = (WantKind != MK_Any);
if (!InOriginalClass)
- R.Priority += CCD_InBaseClass;
+ setInBaseClass(R);
Results.MaybeAddResult(R, CurContext);
}
}
// Visit the protocols of protocols.
- if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ if (const auto *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
if (Protocol->hasDefinition()) {
- const ObjCList<ObjCProtocolDecl> &Protocols
- = Protocol->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ Protocol->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
@@ -5556,19 +5850,19 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
return;
// Add methods in protocols.
- for (auto *I : IFace->protocols())
+ for (ObjCProtocolDecl *I : IFace->protocols())
AddObjCMethods(I, WantInstanceMethods, WantKind, SelIdents, CurContext,
Selectors, AllowSameLength, Results, false, IsRootClass);
// Add methods in categories.
- for (auto *CatDecl : IFace->known_categories()) {
+ for (ObjCCategoryDecl *CatDecl : IFace->known_categories()) {
AddObjCMethods(CatDecl, WantInstanceMethods, WantKind, SelIdents,
CurContext, Selectors, AllowSameLength, Results,
InOriginalClass, IsRootClass);
// Add a categories protocol methods.
- const ObjCList<ObjCProtocolDecl> &Protocols
- = CatDecl->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ CatDecl->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
@@ -5596,13 +5890,12 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
IsRootClass);
}
-
void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
// Try to find the interface where getters might live.
ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
if (!Class) {
- if (ObjCCategoryDecl *Category
- = dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ if (ObjCCategoryDecl *Category =
+ dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
Class = Category->getClassInterface();
if (!Class)
@@ -5625,11 +5918,10 @@ void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
// Try to find the interface where setters might live.
- ObjCInterfaceDecl *Class
- = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
+ ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
if (!Class) {
- if (ObjCCategoryDecl *Category
- = dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ if (ObjCCategoryDecl *Category =
+ dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
Class = Category->getClassInterface();
if (!Class)
@@ -5643,8 +5935,8 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
Results.EnterNewScope();
VisitedSelectorSet Selectors;
- AddObjCMethods(Class, true, MK_OneArgSelector, None, CurContext,
- Selectors, /*AllowSameLength=*/true, Results);
+ AddObjCMethods(Class, true, MK_OneArgSelector, None, CurContext, Selectors,
+ /*AllowSameLength=*/true, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
@@ -5675,9 +5967,9 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
if ((DS.getObjCDeclQualifier() &
(ObjCDeclSpec::DQ_Bycopy | ObjCDeclSpec::DQ_Byref |
ObjCDeclSpec::DQ_Oneway)) == 0) {
- Results.AddResult("bycopy");
- Results.AddResult("byref");
- Results.AddResult("oneway");
+ Results.AddResult("bycopy");
+ Results.AddResult("byref");
+ Results.AddResult("oneway");
}
if ((DS.getObjCDeclQualifier() & ObjCDeclSpec::DQ_CSNullability) == 0) {
Results.AddResult("nonnull");
@@ -5733,7 +6025,7 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
/// common uses of Objective-C. This routine returns that class type,
/// or NULL if no better result could be determined.
static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
- ObjCMessageExpr *Msg = dyn_cast_or_null<ObjCMessageExpr>(E);
+ auto *Msg = dyn_cast_or_null<ObjCMessageExpr>(E);
if (!Msg)
return nullptr;
@@ -5753,8 +6045,8 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
ObjCInterfaceDecl *IFace = nullptr;
switch (Msg->getReceiverKind()) {
case ObjCMessageExpr::Class:
- if (const ObjCObjectType *ObjType
- = Msg->getClassReceiver()->getAs<ObjCObjectType>())
+ if (const ObjCObjectType *ObjType =
+ Msg->getClassReceiver()->getAs<ObjCObjectType>())
IFace = ObjType->getInterface();
break;
@@ -5776,27 +6068,27 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
ObjCInterfaceDecl *Super = IFace->getSuperClass();
if (Method->isInstanceMethod())
return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
- .Case("retain", IFace)
- .Case("strong", IFace)
- .Case("autorelease", IFace)
- .Case("copy", IFace)
- .Case("copyWithZone", IFace)
- .Case("mutableCopy", IFace)
- .Case("mutableCopyWithZone", IFace)
- .Case("awakeFromCoder", IFace)
- .Case("replacementObjectFromCoder", IFace)
+ .Case("retain", IFace)
+ .Case("strong", IFace)
+ .Case("autorelease", IFace)
+ .Case("copy", IFace)
+ .Case("copyWithZone", IFace)
+ .Case("mutableCopy", IFace)
+ .Case("mutableCopyWithZone", IFace)
+ .Case("awakeFromCoder", IFace)
+ .Case("replacementObjectFromCoder", IFace)
+ .Case("class", IFace)
+ .Case("classForCoder", IFace)
+ .Case("superclass", Super)
+ .Default(nullptr);
+
+ return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
+ .Case("new", IFace)
+ .Case("alloc", IFace)
+ .Case("allocWithZone", IFace)
.Case("class", IFace)
- .Case("classForCoder", IFace)
.Case("superclass", Super)
.Default(nullptr);
-
- return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
- .Case("new", IFace)
- .Case("alloc", IFace)
- .Case("allocWithZone", IFace)
- .Case("class", IFace)
- .Case("superclass", Super)
- .Default(nullptr);
}
// Add a special completion for a message send to "super", which fills in the
@@ -5815,10 +6107,10 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
///
/// \returns the Objective-C method declaration that would be invoked by
/// this "super" completion. If NULL, no completion was added.
-static ObjCMethodDecl *AddSuperSendCompletion(
- Sema &S, bool NeedSuperKeyword,
- ArrayRef<IdentifierInfo *> SelIdents,
- ResultBuilder &Results) {
+static ObjCMethodDecl *
+AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ ResultBuilder &Results) {
ObjCMethodDecl *CurMethod = S.getCurMethodDecl();
if (!CurMethod)
return nullptr;
@@ -5838,7 +6130,7 @@ static ObjCMethodDecl *AddSuperSendCompletion(
if (!SuperMethod) {
for (const auto *Cat : Class->known_categories()) {
if ((SuperMethod = Cat->getMethod(CurMethod->getSelector(),
- CurMethod->isInstanceMethod())))
+ CurMethod->isInstanceMethod())))
break;
}
}
@@ -5853,8 +6145,8 @@ static ObjCMethodDecl *AddSuperSendCompletion(
return nullptr;
for (ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin(),
- CurPEnd = CurMethod->param_end(),
- SuperP = SuperMethod->param_begin();
+ CurPEnd = CurMethod->param_end(),
+ SuperP = SuperMethod->param_begin();
CurP != CurPEnd; ++CurP, ++SuperP) {
// Make sure the parameter types are compatible.
if (!S.Context.hasSameUnqualifiedType((*CurP)->getType(),
@@ -5872,8 +6164,7 @@ static ObjCMethodDecl *AddSuperSendCompletion(
// Give this completion a return type.
AddResultTypeChunk(S.Context, getCompletionPrintingPolicy(S), SuperMethod,
- Results.getCompletionContext().getBaseType(),
- Builder);
+ Results.getCompletionContext().getBaseType(), Builder);
// If we need the "super" keyword, add it (plus some spacing).
if (NeedSuperKeyword) {
@@ -5884,11 +6175,11 @@ static ObjCMethodDecl *AddSuperSendCompletion(
Selector Sel = CurMethod->getSelector();
if (Sel.isUnarySelector()) {
if (NeedSuperKeyword)
- Builder.AddTextChunk(Builder.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Builder.AddTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
else
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
} else {
ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin();
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I, ++CurP) {
@@ -5897,20 +6188,17 @@ static ObjCMethodDecl *AddSuperSendCompletion(
if (I < SelIdents.size())
Builder.AddInformativeChunk(
- Builder.getAllocator().CopyString(
- Sel.getNameForSlot(I) + ":"));
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
else if (NeedSuperKeyword || I > SelIdents.size()) {
Builder.AddTextChunk(
- Builder.getAllocator().CopyString(
- Sel.getNameForSlot(I) + ":"));
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
- (*CurP)->getIdentifier()->getName()));
+ (*CurP)->getIdentifier()->getName()));
} else {
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(
- Sel.getNameForSlot(I) + ":"));
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
- (*CurP)->getIdentifier()->getName()));
+ (*CurP)->getIdentifier()->getName()));
}
}
}
@@ -5922,12 +6210,13 @@ static ObjCMethodDecl *AddSuperSendCompletion(
void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_ObjCMessageReceiver,
- getLangOpts().CPlusPlus11
- ? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
- : &ResultBuilder::IsObjCMessageReceiver);
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCMessageReceiver,
+ getLangOpts().CPlusPlus11
+ ? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
+ : &ResultBuilder::IsObjCMessageReceiver);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
Results.EnterNewScope();
@@ -5976,8 +6265,7 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
// send [super ...] is actually calling an instance method on the
// current object.
return CodeCompleteObjCInstanceMessage(S, nullptr, SelIdents,
- AtArgumentExpression,
- CDecl);
+ AtArgumentExpression, CDecl);
}
// Fall through to send to the superclass in CDecl.
@@ -5985,13 +6273,12 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
// "super" may be the name of a type or variable. Figure out which
// it is.
IdentifierInfo *Super = getSuperIdentifier();
- NamedDecl *ND = LookupSingleName(S, Super, SuperLoc,
- LookupOrdinaryName);
+ NamedDecl *ND = LookupSingleName(S, Super, SuperLoc, LookupOrdinaryName);
if ((CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(ND))) {
// "super" names an interface. Use it.
} else if (TypeDecl *TD = dyn_cast_or_null<TypeDecl>(ND)) {
- if (const ObjCObjectType *Iface
- = Context.getTypeDeclType(TD)->getAs<ObjCObjectType>())
+ if (const ObjCObjectType *Iface =
+ Context.getTypeDeclType(TD)->getAs<ObjCObjectType>())
CDecl = Iface->getInterface();
} else if (ND && isa<UnresolvedUsingTypenameDecl>(ND)) {
// "super" names an unresolved type; we can't be more specific.
@@ -6001,11 +6288,10 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Super, SuperLoc);
- ExprResult SuperExpr = ActOnIdExpression(S, SS, TemplateKWLoc, id,
- false, false);
+ ExprResult SuperExpr =
+ ActOnIdExpression(S, SS, TemplateKWLoc, id, false, false);
return CodeCompleteObjCInstanceMessage(S, (Expr *)SuperExpr.get(),
- SelIdents,
- AtArgumentExpression);
+ SelIdents, AtArgumentExpression);
}
// Fall through
@@ -6036,8 +6322,8 @@ static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
if (R.Priority <= BestPriority) {
const ObjCMethodDecl *Method = cast<ObjCMethodDecl>(R.Declaration);
if (NumSelIdents <= Method->param_size()) {
- QualType MyPreferredType = Method->parameters()[NumSelIdents - 1]
- ->getType();
+ QualType MyPreferredType =
+ Method->parameters()[NumSelIdents - 1]->getType();
if (R.Priority < BestPriority || PreferredType.isNull()) {
BestPriority = R.Priority;
PreferredType = MyPreferredType;
@@ -6056,8 +6342,7 @@ static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
- bool AtArgumentExpression,
- bool IsSuper,
+ bool AtArgumentExpression, bool IsSuper,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
ObjCInterfaceDecl *CDecl = nullptr;
@@ -6078,8 +6363,8 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
// If this is a send-to-super, try to add the special "super" send
// completion.
if (IsSuper) {
- if (ObjCMethodDecl *SuperMethod
- = AddSuperSendCompletion(SemaRef, false, SelIdents, Results))
+ if (ObjCMethodDecl *SuperMethod =
+ AddSuperSendCompletion(SemaRef, false, SelIdents, Results))
Results.Ignore(SuperMethod);
}
@@ -6090,9 +6375,8 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
VisitedSelectorSet Selectors;
if (CDecl)
- AddObjCMethods(CDecl, false, MK_Any, SelIdents,
- SemaRef.CurContext, Selectors, AtArgumentExpression,
- Results);
+ AddObjCMethods(CDecl, false, MK_Any, SelIdents, SemaRef.CurContext,
+ Selectors, AtArgumentExpression, Results);
else {
// We're messaging "id" as a type; provide all class/factory methods.
@@ -6111,11 +6395,10 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
}
for (Sema::GlobalMethodPool::iterator M = SemaRef.MethodPool.begin(),
- MEnd = SemaRef.MethodPool.end();
+ MEnd = SemaRef.MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.second;
- MethList && MethList->getMethod();
- MethList = MethList->getNext()) {
+ MethList && MethList->getMethod(); MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
@@ -6138,10 +6421,11 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
QualType T = this->GetTypeFromParser(Receiver);
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage,
- T, SelIdents));
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage, T,
+ SelIdents));
AddClassMessageCompletions(*this, S, Receiver, SelIdents,
AtArgumentExpression, IsSuper, Results);
@@ -6152,8 +6436,8 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
// code-complete the expression using the corresponding parameter type as
// our preferred type, improving completion results.
if (AtArgumentExpression) {
- QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
- SelIdents.size());
+ QualType PreferredType =
+ getPreferredArgumentTypeForMessageSend(Results, SelIdents.size());
if (PreferredType.isNull())
CodeCompleteOrdinaryName(S, PCC_Expression);
else
@@ -6161,8 +6445,7 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
return;
}
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -6182,10 +6465,11 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
return;
RecExpr = Conv.get();
}
- QualType ReceiverType = RecExpr? RecExpr->getType()
- : Super? Context.getObjCObjectPointerType(
+ QualType ReceiverType = RecExpr
+ ? RecExpr->getType()
+ : Super ? Context.getObjCObjectPointerType(
Context.getObjCInterfaceType(Super))
- : Context.getObjCIdType();
+ : Context.getObjCIdType();
// If we're messaging an expression with type "id" or "Class", check
// whether we know something special about the receiver that allows
@@ -6193,13 +6477,12 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType()) {
if (ObjCInterfaceDecl *IFace = GetAssumedMessageSendExprType(RecExpr)) {
if (ReceiverType->isObjCClassType())
- return CodeCompleteObjCClassMessage(S,
- ParsedType::make(Context.getObjCInterfaceType(IFace)),
- SelIdents,
- AtArgumentExpression, Super);
+ return CodeCompleteObjCClassMessage(
+ S, ParsedType::make(Context.getObjCInterfaceType(IFace)), SelIdents,
+ AtArgumentExpression, Super);
- ReceiverType = Context.getObjCObjectPointerType(
- Context.getObjCInterfaceType(IFace));
+ ReceiverType =
+ Context.getObjCObjectPointerType(Context.getObjCInterfaceType(IFace));
}
} else if (RecExpr && getLangOpts().CPlusPlus) {
ExprResult Conv = PerformContextuallyConvertToObjCPointer(RecExpr);
@@ -6210,18 +6493,19 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
}
// Build the set of methods we can see.
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
- ReceiverType, SelIdents));
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
+ ReceiverType, SelIdents));
Results.EnterNewScope();
// If this is a send-to-super, try to add the special "super" send
// completion.
if (Super) {
- if (ObjCMethodDecl *SuperMethod
- = AddSuperSendCompletion(*this, false, SelIdents, Results))
+ if (ObjCMethodDecl *SuperMethod =
+ AddSuperSendCompletion(*this, false, SelIdents, Results))
Results.Ignore(SuperMethod);
}
@@ -6240,30 +6524,29 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ReceiverType->isObjCQualifiedClassType()) {
if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
if (ObjCInterfaceDecl *ClassDecl = CurMethod->getClassInterface())
- AddObjCMethods(ClassDecl, false, MK_Any, SelIdents,
- CurContext, Selectors, AtArgumentExpression, Results);
+ AddObjCMethods(ClassDecl, false, MK_Any, SelIdents, CurContext,
+ Selectors, AtArgumentExpression, Results);
}
}
// Handle messages to a qualified ID ("id<foo>").
- else if (const ObjCObjectPointerType *QualID
- = ReceiverType->getAsObjCQualifiedIdType()) {
+ else if (const ObjCObjectPointerType *QualID =
+ ReceiverType->getAsObjCQualifiedIdType()) {
// Search protocols for instance methods.
for (auto *I : QualID->quals())
- AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
- Selectors, AtArgumentExpression, Results);
+ AddObjCMethods(I, true, MK_Any, SelIdents, CurContext, Selectors,
+ AtArgumentExpression, Results);
}
// Handle messages to a pointer to interface type.
- else if (const ObjCObjectPointerType *IFacePtr
- = ReceiverType->getAsObjCInterfacePointerType()) {
+ else if (const ObjCObjectPointerType *IFacePtr =
+ ReceiverType->getAsObjCInterfacePointerType()) {
// Search the class, its superclasses, etc., for instance methods.
AddObjCMethods(IFacePtr->getInterfaceDecl(), true, MK_Any, SelIdents,
- CurContext, Selectors, AtArgumentExpression,
- Results);
+ CurContext, Selectors, AtArgumentExpression, Results);
// Search protocols for instance methods.
for (auto *I : IFacePtr->quals())
- AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
- Selectors, AtArgumentExpression, Results);
+ AddObjCMethods(I, true, MK_Any, SelIdents, CurContext, Selectors,
+ AtArgumentExpression, Results);
}
// Handle messages to "id".
else if (ReceiverType->isObjCIdType()) {
@@ -6287,8 +6570,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
MEnd = MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.first;
- MethList && MethList->getMethod();
- MethList = MethList->getNext()) {
+ MethList && MethList->getMethod(); MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
@@ -6305,15 +6587,14 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
}
Results.ExitScope();
-
// If we're actually at the argument expression (rather than prior to the
// selector), we're actually performing code completion for an expression.
// Determine whether we have a single, best method. If so, we can
// code-complete the expression using the corresponding parameter type as
// our preferred type, improving completion results.
if (AtArgumentExpression) {
- QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
- SelIdents.size());
+ QualType PreferredType =
+ getPreferredArgumentTypeForMessageSend(Results, SelIdents.size());
if (PreferredType.isNull())
CodeCompleteOrdinaryName(S, PCC_Expression);
else
@@ -6321,9 +6602,8 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
return;
}
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCForCollection(Scope *S,
@@ -6347,8 +6627,8 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
// If we have an external source, load the entire class method
// pool from the AST file.
if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
- I != N; ++I) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors(); I != N;
+ ++I) {
Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || MethodPool.count(Sel))
continue;
@@ -6362,7 +6642,7 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
CodeCompletionContext::CCC_SelectorName);
Results.EnterNewScope();
for (GlobalMethodPool::iterator M = MethodPool.begin(),
- MEnd = MethodPool.end();
+ MEnd = MethodPool.end();
M != MEnd; ++M) {
Selector Sel = M->first;
@@ -6372,8 +6652,8 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
if (Sel.isUnarySelector()) {
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
Results.AddResult(Builder.TakeString());
continue;
}
@@ -6382,8 +6662,8 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I) {
if (I == SelIdents.size()) {
if (!Accumulator.empty()) {
- Builder.AddInformativeChunk(Builder.getAllocator().CopyString(
- Accumulator));
+ Builder.AddInformativeChunk(
+ Builder.getAllocator().CopyString(Accumulator));
Accumulator.clear();
}
}
@@ -6391,7 +6671,7 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
Accumulator += Sel.getNameForSlot(I);
Accumulator += ':';
}
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString( Accumulator));
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(Accumulator));
Results.AddResult(Builder.TakeString());
}
Results.ExitScope();
@@ -6411,13 +6691,14 @@ static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
// Record any protocols we find.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(D))
if (!OnlyForwardDeclarations || !Proto->hasDefinition())
- Results.AddResult(Result(Proto, Results.getBasePriority(Proto),nullptr),
- CurContext, nullptr, false);
+ Results.AddResult(
+ Result(Proto, Results.getBasePriority(Proto), nullptr), CurContext,
+ nullptr, false);
}
}
void Sema::CodeCompleteObjCProtocolReferences(
- ArrayRef<IdentifierLocPair> Protocols) {
+ ArrayRef<IdentifierLocPair> Protocols) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
@@ -6429,8 +6710,7 @@ void Sema::CodeCompleteObjCProtocolReferences(
// already seen.
// FIXME: This doesn't work when caching code-completion results.
for (const IdentifierLocPair &Pair : Protocols)
- if (ObjCProtocolDecl *Protocol = LookupProtocol(Pair.first,
- Pair.second))
+ if (ObjCProtocolDecl *Protocol = LookupProtocol(Pair.first, Pair.second))
Results.Ignore(Protocol);
// Add all protocols.
@@ -6476,8 +6756,9 @@ static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
if (const auto *Class = dyn_cast<ObjCInterfaceDecl>(D))
if ((!OnlyForwardDeclarations || !Class->hasDefinition()) &&
(!OnlyUnimplemented || !Class->getImplementation()))
- Results.AddResult(Result(Class, Results.getBasePriority(Class),nullptr),
- CurContext, nullptr, false);
+ Results.AddResult(
+ Result(Class, Results.getBasePriority(Class), nullptr), CurContext,
+ nullptr, false);
}
}
@@ -6507,8 +6788,8 @@ void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
Results.EnterNewScope();
// Make sure that we ignore the class we're currently defining.
- NamedDecl *CurClass
- = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ NamedDecl *CurClass =
+ LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
if (CurClass && isa<ObjCInterfaceDecl>(CurClass))
Results.Ignore(CurClass);
@@ -6554,9 +6835,10 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
// Ignore any categories we find that have already been implemented by this
// interface.
llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
- NamedDecl *CurClass
- = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
- if (ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass)){
+ NamedDecl *CurClass =
+ LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ if (ObjCInterfaceDecl *Class =
+ dyn_cast_or_null<ObjCInterfaceDecl>(CurClass)) {
for (const auto *Cat : Class->visible_categories())
CategoryNames.insert(Cat->getIdentifier());
}
@@ -6567,9 +6849,9 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
for (const auto *D : TU->decls())
if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
if (CategoryNames.insert(Category->getIdentifier()).second)
- Results.AddResult(Result(Category, Results.getBasePriority(Category),
- nullptr),
- CurContext, nullptr, false);
+ Results.AddResult(
+ Result(Category, Results.getBasePriority(Category), nullptr),
+ CurContext, nullptr, false);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
@@ -6584,8 +6866,8 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
// Find the corresponding interface. If we couldn't find the interface, the
// program itself is ill-formed. However, we'll try to be helpful still by
// providing the list of all of the categories we know about.
- NamedDecl *CurClass
- = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ NamedDecl *CurClass =
+ LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass);
if (!Class)
return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
@@ -6620,15 +6902,13 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
CodeCompletionContext CCContext(CodeCompletionContext::CCC_Other);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CCContext);
+ CodeCompleter->getCodeCompletionTUInfo(), CCContext);
// Figure out where this @synthesize lives.
- ObjCContainerDecl *Container
- = dyn_cast_or_null<ObjCContainerDecl>(CurContext);
- if (!Container ||
- (!isa<ObjCImplementationDecl>(Container) &&
- !isa<ObjCCategoryImplDecl>(Container)))
+ ObjCContainerDecl *Container =
+ dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ if (!Container || (!isa<ObjCImplementationDecl>(Container) &&
+ !isa<ObjCCategoryImplDecl>(Container)))
return;
// Ignore any properties that have already been implemented.
@@ -6640,8 +6920,8 @@ void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
// Add any properties that we find.
AddedPropertiesSet AddedProperties;
Results.EnterNewScope();
- if (ObjCImplementationDecl *ClassImpl
- = dyn_cast<ObjCImplementationDecl>(Container))
+ if (ObjCImplementationDecl *ClassImpl =
+ dyn_cast<ObjCImplementationDecl>(Container))
AddObjCProperties(CCContext, ClassImpl->getClassInterface(), false,
/*AllowNullaryMethods=*/false, CurContext,
AddedProperties, Results);
@@ -6656,37 +6936,37 @@ void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
Results.data(), Results.size());
}
-void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
- IdentifierInfo *PropertyName) {
+void Sema::CodeCompleteObjCPropertySynthesizeIvar(
+ Scope *S, IdentifierInfo *PropertyName) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
// Figure out where this @synthesize lives.
- ObjCContainerDecl *Container
- = dyn_cast_or_null<ObjCContainerDecl>(CurContext);
- if (!Container ||
- (!isa<ObjCImplementationDecl>(Container) &&
- !isa<ObjCCategoryImplDecl>(Container)))
+ ObjCContainerDecl *Container =
+ dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ if (!Container || (!isa<ObjCImplementationDecl>(Container) &&
+ !isa<ObjCCategoryImplDecl>(Container)))
return;
// Figure out which interface we're looking into.
ObjCInterfaceDecl *Class = nullptr;
- if (ObjCImplementationDecl *ClassImpl
- = dyn_cast<ObjCImplementationDecl>(Container))
+ if (ObjCImplementationDecl *ClassImpl =
+ dyn_cast<ObjCImplementationDecl>(Container))
Class = ClassImpl->getClassInterface();
else
- Class = cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl()
- ->getClassInterface();
+ Class = cast<ObjCCategoryImplDecl>(Container)
+ ->getCategoryDecl()
+ ->getClassInterface();
// Determine the type of the property we're synthesizing.
QualType PropertyType = Context.getObjCIdType();
if (Class) {
if (ObjCPropertyDecl *Property = Class->FindPropertyDeclaration(
PropertyName, ObjCPropertyQueryKind::OBJC_PR_query_instance)) {
- PropertyType
- = Property->getType().getNonReferenceType().getUnqualifiedType();
+ PropertyType =
+ Property->getType().getNonReferenceType().getUnqualifiedType();
// Give preference to ivars
Results.setPreferredType(PropertyType);
@@ -6701,7 +6981,7 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
NameWithPrefix += PropertyName->getName();
std::string NameWithSuffix = PropertyName->getName().str();
NameWithSuffix += '_';
- for(; Class; Class = Class->getSuperClass()) {
+ for (; Class; Class = Class->getSuperClass()) {
for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
Ivar = Ivar->getNextIvar()) {
Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar), nullptr),
@@ -6717,8 +6997,8 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
// Reduce the priority of this result by one, to give it a slight
// advantage over other results whose names don't match so closely.
if (Results.size() &&
- Results.data()[Results.size() - 1].Kind
- == CodeCompletionResult::RK_Declaration &&
+ Results.data()[Results.size() - 1].Kind ==
+ CodeCompletionResult::RK_Declaration &&
Results.data()[Results.size() - 1].Declaration == Ivar)
Results.data()[Results.size() - 1].Priority--;
}
@@ -6732,14 +7012,14 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
typedef CodeCompletionResult Result;
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo(),
- Priority,CXAvailability_Available);
+ Priority, CXAvailability_Available);
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
- Builder.AddResultTypeChunk(GetCompletionTypeString(PropertyType, Context,
- Policy, Allocator));
+ Builder.AddResultTypeChunk(
+ GetCompletionTypeString(PropertyType, Context, Policy, Allocator));
Builder.AddTypedTextChunk(Allocator.CopyString(NameWithPrefix));
- Results.AddResult(Result(Builder.TakeString(), Priority,
- CXCursor_ObjCIvarDecl));
+ Results.AddResult(
+ Result(Builder.TakeString(), Priority, CXCursor_ObjCIvarDecl));
}
Results.ExitScope();
@@ -6750,8 +7030,9 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
// Mapping from selectors to the methods that implement that selector, along
// with the "in original class" flag.
-typedef llvm::DenseMap<
- Selector, llvm::PointerIntPair<ObjCMethodDecl *, 1, bool> > KnownMethodsMap;
+typedef llvm::DenseMap<Selector,
+ llvm::PointerIntPair<ObjCMethodDecl *, 1, bool>>
+ KnownMethodsMap;
/// Find all of the methods that reside in the given container
/// (and its superclasses, protocols, etc.) that meet the given
@@ -6771,8 +7052,8 @@ static void FindImplementableMethods(ASTContext &Context,
IFace = IFace->getDefinition();
Container = IFace;
- const ObjCList<ObjCProtocolDecl> &Protocols
- = IFace->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ IFace->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
@@ -6788,14 +7069,14 @@ static void FindImplementableMethods(ASTContext &Context,
// Visit the superclass.
if (IFace->getSuperClass())
FindImplementableMethods(Context, IFace->getSuperClass(),
- WantInstanceMethods, ReturnType,
- KnownMethods, false);
+ WantInstanceMethods, ReturnType, KnownMethods,
+ false);
}
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
// Recurse into protocols.
- const ObjCList<ObjCProtocolDecl> &Protocols
- = Category->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ Category->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
@@ -6817,10 +7098,10 @@ static void FindImplementableMethods(ASTContext &Context,
Container = Protocol;
// Recurse into protocols.
- const ObjCList<ObjCProtocolDecl> &Protocols
- = Protocol->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ Protocol->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
+ E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
KnownMethods, false);
@@ -6843,8 +7124,7 @@ static void FindImplementableMethods(ASTContext &Context,
/// Add the parenthesized return or parameter type chunk to a code
/// completion string.
-static void AddObjCPassingTypeChunk(QualType Type,
- unsigned ObjCDeclQuals,
+static void AddObjCPassingTypeChunk(QualType Type, unsigned ObjCDeclQuals,
ASTContext &Context,
const PrintingPolicy &Policy,
CodeCompletionBuilder &Builder) {
@@ -6852,15 +7132,14 @@ static void AddObjCPassingTypeChunk(QualType Type,
std::string Quals = formatObjCParamQualifiers(ObjCDeclQuals, Type);
if (!Quals.empty())
Builder.AddTextChunk(Builder.getAllocator().CopyString(Quals));
- Builder.AddTextChunk(GetCompletionTypeString(Type, Context, Policy,
- Builder.getAllocator()));
+ Builder.AddTextChunk(
+ GetCompletionTypeString(Type, Context, Policy, Builder.getAllocator()));
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
/// Determine whether the given class is or inherits from a class by
/// the given name.
-static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
- StringRef Name) {
+static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class, StringRef Name) {
if (!Class)
return false;
@@ -6874,8 +7153,7 @@ static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
/// Key-Value Observing (KVO).
static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
bool IsInstanceMethod,
- QualType ReturnType,
- ASTContext &Context,
+ QualType ReturnType, ASTContext &Context,
VisitedSelectorSet &KnownSelectors,
ResultBuilder &Results) {
IdentifierInfo *PropName = Property->getIdentifier();
@@ -6900,7 +7178,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
const char *CopiedKey;
KeyHolder(CodeCompletionAllocator &Allocator, StringRef Key)
- : Allocator(Allocator), Key(Key), CopiedKey(nullptr) {}
+ : Allocator(Allocator), Key(Key), CopiedKey(nullptr) {}
operator const char *() {
if (CopiedKey)
@@ -6915,19 +7193,19 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (!UpperKey.empty())
UpperKey[0] = toUppercase(UpperKey[0]);
- bool ReturnTypeMatchesProperty = ReturnType.isNull() ||
- Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
- Property->getType());
- bool ReturnTypeMatchesVoid
- = ReturnType.isNull() || ReturnType->isVoidType();
+ bool ReturnTypeMatchesProperty =
+ ReturnType.isNull() ||
+ Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
+ Property->getType());
+ bool ReturnTypeMatchesVoid = ReturnType.isNull() || ReturnType->isVoidType();
// Add the normal accessor -(type)key.
if (IsInstanceMethod &&
KnownSelectors.insert(Selectors.getNullarySelector(PropName)).second &&
ReturnTypeMatchesProperty && !Property->getGetterMethodDecl()) {
if (ReturnType.isNull())
- AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
- Context, Policy, Builder);
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0, Context, Policy,
+ Builder);
Builder.AddTypedTextChunk(Key);
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
@@ -6939,9 +7217,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod &&
((!ReturnType.isNull() &&
(ReturnType->isIntegerType() || ReturnType->isBooleanType())) ||
- (ReturnType.isNull() &&
- (Property->getType()->isIntegerType() ||
- Property->getType()->isBooleanType())))) {
+ (ReturnType.isNull() && (Property->getType()->isIntegerType() ||
+ Property->getType()->isBooleanType())))) {
std::string SelectorName = (Twine("is") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
@@ -6952,8 +7229,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
- Builder.AddTypedTextChunk(
- Allocator.CopyString(SelectorId->getName()));
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorId->getName()));
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
}
@@ -6971,11 +7247,10 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
- Builder.AddTypedTextChunk(
- Allocator.CopyString(SelectorId->getName()));
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorId->getName()));
Builder.AddTypedTextChunk(":");
- AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
- Context, Policy, Builder);
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0, Context, Policy,
+ Builder);
Builder.AddTextChunk(Key);
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
@@ -6987,8 +7262,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
unsigned IndexedSetterPriority = CCP_CodePattern;
unsigned UnorderedGetterPriority = CCP_CodePattern;
unsigned UnorderedSetterPriority = CCP_CodePattern;
- if (const ObjCObjectPointerType *ObjCPointer
- = Property->getType()->getAs<ObjCObjectPointerType>()) {
+ if (const auto *ObjCPointer =
+ Property->getType()->getAs<ObjCObjectPointerType>()) {
if (ObjCInterfaceDecl *IFace = ObjCPointer->getInterfaceDecl()) {
// If this interface type is not provably derived from a known
// collection, penalize the corresponding completions.
@@ -7024,12 +7299,11 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
- Builder.AddTypedTextChunk(
- Allocator.CopyString(SelectorId->getName()));
- Results.AddResult(Result(Builder.TakeString(),
- std::min(IndexedGetterPriority,
- UnorderedGetterPriority),
- CXCursor_ObjCInstanceMethodDecl));
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorId->getName()));
+ Results.AddResult(
+ Result(Builder.TakeString(),
+ std::min(IndexedGetterPriority, UnorderedGetterPriority),
+ CXCursor_ObjCInstanceMethodDecl));
}
}
@@ -7037,8 +7311,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// Add -(id)objectInKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod &&
(ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
- std::string SelectorName
- = (Twine("objectIn") + UpperKey + "AtIndex").str();
+ std::string SelectorName = (Twine("objectIn") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7062,10 +7335,10 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
- ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
- ->getName() == "NSArray"))) {
- std::string SelectorName
- = (Twine(Property->getName()) + "AtIndexes").str();
+ ReturnType->getAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl()
+ ->getName() == "NSArray"))) {
+ std::string SelectorName = (Twine(Property->getName()) + "AtIndexes").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7087,10 +7360,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// Add -(void)getKey:(type **)buffer range:(NSRange)inRange
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("get") + UpperKey).str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get(SelectorName),
- &Context.Idents.get("range")
- };
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
+ &Context.Idents.get("range")};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7121,10 +7392,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)insertObject:(type *)object inKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("in") + UpperKey + "AtIndex").str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get("insertObject"),
- &Context.Idents.get(SelectorName)
- };
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get("insertObject"),
+ &Context.Idents.get(SelectorName)};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7153,10 +7422,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)insertKey:(NSArray *)array atIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("insert") + UpperKey).str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get(SelectorName),
- &Context.Idents.get("atIndexes")
- };
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
+ &Context.Idents.get("atIndexes")};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7183,8 +7450,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// -(void)removeObjectFromKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
+ std::string SelectorName =
+ (Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7205,8 +7472,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// -(void)removeKeyAtIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("remove") + UpperKey + "AtIndexes").str();
+ std::string SelectorName = (Twine("remove") + UpperKey + "AtIndexes").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7227,12 +7493,10 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)replaceObjectInKeyAtIndex:(NSUInteger)index withObject:(id)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("replaceObjectIn") + UpperKey + "AtIndex").str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get(SelectorName),
- &Context.Idents.get("withObject")
- };
+ std::string SelectorName =
+ (Twine("replaceObjectIn") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
+ &Context.Idents.get("withObject")};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7259,13 +7523,11 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)replaceKeyAtIndexes:(NSIndexSet *)indexes withKey:(NSArray *)array
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName1
- = (Twine("replace") + UpperKey + "AtIndexes").str();
+ std::string SelectorName1 =
+ (Twine("replace") + UpperKey + "AtIndexes").str();
std::string SelectorName2 = (Twine("with") + UpperKey).str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get(SelectorName1),
- &Context.Idents.get(SelectorName2)
- };
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName1),
+ &Context.Idents.get(SelectorName2)};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7296,8 +7558,9 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
- ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
- ->getName() == "NSEnumerator"))) {
+ ReturnType->getAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl()
+ ->getName() == "NSEnumerator"))) {
std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
@@ -7310,7 +7573,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
- CXCursor_ObjCInstanceMethodDecl));
+ CXCursor_ObjCInstanceMethodDecl));
}
}
@@ -7333,9 +7596,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
} else {
- Builder.AddTextChunk(GetCompletionTypeString(ReturnType, Context,
- Policy,
- Builder.getAllocator()));
+ Builder.AddTextChunk(GetCompletionTypeString(
+ ReturnType, Context, Policy, Builder.getAllocator()));
}
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
@@ -7347,8 +7609,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// Mutable unordered accessors
// - (void)addKeyObject:(type *)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("add") + UpperKey + Twine("Object")).str();
+ std::string SelectorName =
+ (Twine("add") + UpperKey + Twine("Object")).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7391,8 +7653,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)removeKeyObject:(type *)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("remove") + UpperKey + Twine("Object")).str();
+ std::string SelectorName =
+ (Twine("remove") + UpperKey + Twine("Object")).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7460,10 +7722,11 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
- ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
- ->getName() == "NSSet"))) {
- std::string SelectorName
- = (Twine("keyPathsForValuesAffecting") + UpperKey).str();
+ ReturnType->getAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl()
+ ->getName() == "NSSet"))) {
+ std::string SelectorName =
+ (Twine("keyPathsForValuesAffecting") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
@@ -7475,17 +7738,16 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
- CXCursor_ObjCClassMethodDecl));
+ CXCursor_ObjCClassMethodDecl));
}
}
// + (BOOL)automaticallyNotifiesObserversForKey
if (!IsInstanceMethod &&
- (ReturnType.isNull() ||
- ReturnType->isIntegerType() ||
+ (ReturnType.isNull() || ReturnType->isIntegerType() ||
ReturnType->isBooleanType())) {
- std::string SelectorName
- = (Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
+ std::string SelectorName =
+ (Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
@@ -7497,7 +7759,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
- CXCursor_ObjCClassMethodDecl));
+ CXCursor_ObjCClassMethodDecl));
}
}
}
@@ -7509,8 +7771,8 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
QualType ReturnType = GetTypeFromParser(ReturnTy);
Decl *IDecl = nullptr;
if (CurContext->isObjCContainer()) {
- ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
- IDecl = OCD;
+ ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
+ IDecl = OCD;
}
// Determine where we should start searching for methods.
ObjCContainerDecl *SearchDecl = nullptr;
@@ -7519,8 +7781,8 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
if (ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D)) {
SearchDecl = Impl->getClassInterface();
IsInImplementation = true;
- } else if (ObjCCategoryImplDecl *CatImpl
- = dyn_cast<ObjCCategoryImplDecl>(D)) {
+ } else if (ObjCCategoryImplDecl *CatImpl =
+ dyn_cast<ObjCCategoryImplDecl>(D)) {
SearchDecl = CatImpl->getCategoryDecl();
IsInImplementation = true;
} else
@@ -7534,15 +7796,14 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
if (!SearchDecl) {
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- nullptr, 0);
+ CodeCompletionContext::CCC_Other, nullptr, 0);
return;
}
// Find all of the methods that we could declare/implement here.
KnownMethodsMap KnownMethods;
- FindImplementableMethods(Context, SearchDecl, IsInstanceMethod,
- ReturnType, KnownMethods);
+ FindImplementableMethods(Context, SearchDecl, IsInstanceMethod, ReturnType,
+ KnownMethods);
// Add declarations or definitions for each of the known methods.
typedef CodeCompletionResult Result;
@@ -7552,7 +7813,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
Results.EnterNewScope();
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
for (KnownMethodsMap::iterator M = KnownMethods.begin(),
- MEnd = KnownMethods.end();
+ MEnd = KnownMethods.end();
M != MEnd; ++M) {
ObjCMethodDecl *Method = M->second.getPointer();
CodeCompletionBuilder Builder(Results.getAllocator(),
@@ -7569,21 +7830,20 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
if (ReturnType.isNull()) {
QualType ResTy = Method->getSendResultType().stripObjCKindOfType(Context);
AttributedType::stripOuterNullability(ResTy);
- AddObjCPassingTypeChunk(ResTy,
- Method->getObjCDeclQualifier(), Context, Policy,
- Builder);
+ AddObjCPassingTypeChunk(ResTy, Method->getObjCDeclQualifier(), Context,
+ Policy, Builder);
}
Selector Sel = Method->getSelector();
// Add the first part of the selector to the pattern.
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
// Add parameters to the pattern.
unsigned I = 0;
for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
- PEnd = Method->param_end();
+ PEnd = Method->param_end();
P != PEnd; (void)++P, ++I) {
// Add the part of the selector name.
if (I == 0)
@@ -7591,7 +7851,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
else if (I < Sel.getNumArgs()) {
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
} else
break;
@@ -7601,16 +7861,14 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParamType = (*P)->getType();
else
ParamType = (*P)->getOriginalType();
- ParamType = ParamType.substObjCTypeArgs(Context, {},
- ObjCSubstitutionContext::Parameter);
+ ParamType = ParamType.substObjCTypeArgs(
+ Context, {}, ObjCSubstitutionContext::Parameter);
AttributedType::stripOuterNullability(ParamType);
- AddObjCPassingTypeChunk(ParamType,
- (*P)->getObjCDeclQualifier(),
- Context, Policy,
- Builder);
+ AddObjCPassingTypeChunk(ParamType, (*P)->getObjCDeclQualifier(), Context,
+ Policy, Builder);
if (IdentifierInfo *Id = (*P)->getIdentifier())
- Builder.AddTextChunk(Builder.getAllocator().CopyString( Id->getName()));
+ Builder.AddTextChunk(Builder.getAllocator().CopyString(Id->getName()));
}
if (Method->isVariadic()) {
@@ -7638,25 +7896,24 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
}
unsigned Priority = CCP_CodePattern;
+ auto R = Result(Builder.TakeString(), Method, Priority);
if (!M->second.getInt())
- Priority += CCD_InBaseClass;
-
- Results.AddResult(Result(Builder.TakeString(), Method, Priority));
+ setInBaseClass(R);
+ Results.AddResult(std::move(R));
}
// Add Key-Value-Coding and Key-Value-Observing accessor methods for all of
// the properties in this class and its categories.
- if (Context.getLangOpts().ObjC2) {
+ if (Context.getLangOpts().ObjC) {
SmallVector<ObjCContainerDecl *, 4> Containers;
Containers.push_back(SearchDecl);
VisitedSelectorSet KnownSelectors;
for (KnownMethodsMap::iterator M = KnownMethods.begin(),
- MEnd = KnownMethods.end();
+ MEnd = KnownMethods.end();
M != MEnd; ++M)
KnownSelectors.insert(M->first);
-
ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(SearchDecl);
if (!IFace)
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(SearchDecl))
@@ -7680,16 +7937,14 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
Results.data(), Results.size());
}
-void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
- bool IsInstanceMethod,
- bool AtParameterName,
- ParsedType ReturnTy,
- ArrayRef<IdentifierInfo *> SelIdents) {
+void Sema::CodeCompleteObjCMethodDeclSelector(
+ Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnTy,
+ ArrayRef<IdentifierInfo *> SelIdents) {
// If we have an external source, load the entire class method
// pool from the AST file.
if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
- I != N; ++I) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors(); I != N;
+ ++I) {
Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || MethodPool.count(Sel))
continue;
@@ -7711,10 +7966,9 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
for (GlobalMethodPool::iterator M = MethodPool.begin(),
MEnd = MethodPool.end();
M != MEnd; ++M) {
- for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first :
- &M->second.second;
- MethList && MethList->getMethod();
- MethList = MethList->getNext()) {
+ for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first
+ : &M->second.second;
+ MethList && MethList->getMethod(); MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
@@ -7729,7 +7983,7 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Param->getIdentifier()->getName()));
+ Param->getIdentifier()->getName()));
Results.AddResult(Builder.TakeString());
}
}
@@ -7875,7 +8129,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddPlaceholderChunk("arguments");
Results.AddResult(Builder.TakeString());
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
// #import "header"
Builder.AddTypedTextChunk("import");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7927,29 +8181,27 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
}
void Sema::CodeCompleteInPreprocessorConditionalExclusion(Scope *S) {
- CodeCompleteOrdinaryName(S,
- S->getFnParent()? Sema::PCC_RecoveryInFunction
- : Sema::PCC_Namespace);
+ CodeCompleteOrdinaryName(S, S->getFnParent() ? Sema::PCC_RecoveryInFunction
+ : Sema::PCC_Namespace);
}
void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- IsDefinition? CodeCompletionContext::CCC_MacroName
- : CodeCompletionContext::CCC_MacroNameUse);
+ IsDefinition ? CodeCompletionContext::CCC_MacroName
+ : CodeCompletionContext::CCC_MacroNameUse);
if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
// Add just the names of macros, not their arguments.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Results.EnterNewScope();
for (Preprocessor::macro_iterator M = PP.macro_begin(),
- MEnd = PP.macro_end();
+ MEnd = PP.macro_end();
M != MEnd; ++M) {
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- M->first->getName()));
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- CCP_CodePattern,
- CXCursor_MacroDefinition));
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(M->first->getName()));
+ Results.AddResult(CodeCompletionResult(
+ Builder.TakeString(), CCP_CodePattern, CXCursor_MacroDefinition));
}
Results.ExitScope();
} else if (IsDefinition) {
@@ -7970,7 +8222,7 @@ void Sema::CodeCompletePreprocessorExpression() {
CodeCompleter ? CodeCompleter->loadExternal() : false,
true);
- // defined (<macro>)
+ // defined (<macro>)
Results.EnterNewScope();
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
@@ -8041,7 +8293,7 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
std::error_code EC;
unsigned Count = 0;
for (auto It = FS->dir_begin(Dir, EC);
- !EC && It != vfs::directory_iterator(); It.increment(EC)) {
+ !EC && It != llvm::vfs::directory_iterator(); It.increment(EC)) {
if (++Count == 2500) // If we happen to hit a huge directory,
break; // bail out early so we're not too slow.
StringRef Filename = llvm::sys::path::filename(It->path());
@@ -8070,7 +8322,6 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
// Helper: adds results relative to IncludeDir, if possible.
auto AddFilesFromDirLookup = [&](const DirectoryLookup &IncludeDir,
bool IsSystem) {
- llvm::SmallString<128> Dir;
switch (IncludeDir.getLookupType()) {
case DirectoryLookup::LT_HeaderMap:
// header maps are not (currently) enumerable.
@@ -8098,7 +8349,7 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
AddFilesFromDirLookup(D, false);
}
for (const auto &D : make_range(S.angled_dir_begin(), S.angled_dir_end()))
- AddFilesFromDirLookup(D, true);
+ AddFilesFromDirLookup(D, false);
for (const auto &D : make_range(S.system_dir_begin(), S.system_dir_end()))
AddFilesFromDirLookup(D, true);
@@ -8108,8 +8359,8 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
void Sema::CodeCompleteNaturalLanguage() {
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_NaturalLanguage,
- nullptr, 0);
+ CodeCompletionContext::CCC_NaturalLanguage, nullptr,
+ 0);
}
void Sema::CodeCompleteAvailabilityPlatformName() {
@@ -8128,9 +8379,9 @@ void Sema::CodeCompleteAvailabilityPlatformName() {
Results.data(), Results.size());
}
-void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- SmallVectorImpl<CodeCompletionResult> &Results) {
+void Sema::GatherGlobalCodeCompletions(
+ CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo,
+ SmallVectorImpl<CodeCompletionResult> &Results) {
ResultBuilder Builder(*this, Allocator, CCTUInfo,
CodeCompletionContext::CCC_Recovery);
if (!CodeCompleter || CodeCompleter->includeGlobals()) {
@@ -8147,6 +8398,6 @@ void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
true);
Results.clear();
- Results.insert(Results.end(),
- Builder.data(), Builder.data() + Builder.size());
+ Results.insert(Results.end(), Builder.data(),
+ Builder.data() + Builder.size());
}
diff --git a/lib/Sema/SemaCoroutine.cpp b/lib/Sema/SemaCoroutine.cpp
index 826cd48a8a..cc79238470 100644
--- a/lib/Sema/SemaCoroutine.cpp
+++ b/lib/Sema/SemaCoroutine.cpp
@@ -565,8 +565,8 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
// Create an initialization sequence for the promise type using the
// constructor arguments, wrapped in a parenthesized list expression.
- Expr *PLE = new (Context) ParenListExpr(Context, FD->getLocation(),
- CtorArgExprs, FD->getLocation());
+ Expr *PLE = ParenListExpr::Create(Context, FD->getLocation(),
+ CtorArgExprs, FD->getLocation());
InitializedEntity Entity = InitializedEntity::InitializeVariable(VD);
InitializationKind Kind = InitializationKind::CreateForInit(
VD->getLocation(), /*DirectInit=*/true, PLE);
@@ -841,6 +841,19 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E,
E = R.get();
}
+ // Move the return value if we can
+ if (E) {
+ auto NRVOCandidate = this->getCopyElisionCandidate(E->getType(), E, CES_AsIfByStdMove);
+ if (NRVOCandidate) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeResult(Loc, E->getType(), NRVOCandidate);
+ ExprResult MoveResult = this->PerformMoveOrCopyInitialization(
+ Entity, NRVOCandidate, E->getType(), E);
+ if (MoveResult.get())
+ E = MoveResult.get();
+ }
+ }
+
// FIXME: If the operand is a reference to a variable that's about to go out
// of scope, we should treat the operand as an xvalue for this overload
// resolution.
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index d02b971230..6dab332f4f 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -2111,7 +2111,7 @@ void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
// Allow multiple definitions for ObjC built-in typedefs.
// FIXME: Verify the underlying types are equivalent!
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
const IdentifierInfo *TypeID = New->getIdentifier();
switch (TypeID->getLength()) {
default: break;
@@ -3192,7 +3192,12 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
if (RequiresAdjustment) {
const FunctionType *AdjustedType = New->getType()->getAs<FunctionType>();
AdjustedType = Context.adjustFunctionType(AdjustedType, NewTypeInfo);
- New->setType(QualType(AdjustedType, 0));
+
+ QualType AdjustedQT = QualType(AdjustedType, 0);
+ LangAS AS = Old->getType().getAddressSpace();
+ AdjustedQT = Context.getAddrSpaceQualType(AdjustedQT, AS);
+
+ New->setType(AdjustedQT);
NewQType = Context.getCanonicalType(New->getType());
NewType = cast<FunctionType>(NewQType);
}
@@ -5575,11 +5580,13 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T,
if (VLATy->getElementType()->isVariablyModifiedType())
return QualType();
- llvm::APSInt Res;
+ Expr::EvalResult Result;
if (!VLATy->getSizeExpr() ||
- !VLATy->getSizeExpr()->EvaluateAsInt(Res, Context))
+ !VLATy->getSizeExpr()->EvaluateAsInt(Result, Context))
return QualType();
+ llvm::APSInt Res = Result.Val.getInt();
+
// Check whether the array size is negative.
if (Res.isSigned() && Res.isNegative()) {
SizeIsNegative = true;
@@ -7352,19 +7359,23 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
}
- // OpenCL v1.2 s6.5 - All program scope variables must be declared in the
+ // OpenCL C v1.2 s6.5 - All program scope variables must be declared in the
// __constant address space.
- // OpenCL v2.0 s6.5.1 - Variables defined at program scope and static
+ // OpenCL C v2.0 s6.5.1 - Variables defined at program scope and static
// variables inside a function can also be declared in the global
// address space.
+ // OpenCL C++ v1.0 s2.5 inherits rule from OpenCL C v2.0 and allows local
+ // address space additionally.
+ // FIXME: Add local AS for OpenCL C++.
if (NewVD->isFileVarDecl() || NewVD->isStaticLocal() ||
NewVD->hasExternalStorage()) {
if (!T->isSamplerT() &&
!(T.getAddressSpace() == LangAS::opencl_constant ||
(T.getAddressSpace() == LangAS::opencl_global &&
- getLangOpts().OpenCLVersion == 200))) {
+ (getLangOpts().OpenCLVersion == 200 ||
+ getLangOpts().OpenCLCPlusPlus)))) {
int Scope = NewVD->isStaticLocal() | NewVD->hasExternalStorage() << 1;
- if (getLangOpts().OpenCLVersion == 200)
+ if (getLangOpts().OpenCLVersion == 200 || getLangOpts().OpenCLCPlusPlus)
Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
<< Scope << "global or constant";
else
@@ -8095,7 +8106,7 @@ static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
const Type *UnderlyingTy = PT->getPointeeOrArrayElementType();
// Call ourself to check an underlying type of an array. Since the
// getPointeeOrArrayElementType returns an innermost type which is not an
- // array, this recusive call only happens once.
+ // array, this recursive call only happens once.
return getOpenCLKernelParameterType(S, QualType(UnderlyingTy, 0));
}
@@ -9362,20 +9373,6 @@ bool Sema::shouldLinkDependentDeclWithPrevious(Decl *D, Decl *PrevDecl) {
PrevVD->getType());
}
-namespace MultiVersioning {
-enum Type { None, Target, CPUSpecific, CPUDispatch};
-} // MultiVersionType
-
-static MultiVersioning::Type
-getMultiVersionType(const FunctionDecl *FD) {
- if (FD->hasAttr<TargetAttr>())
- return MultiVersioning::Target;
- if (FD->hasAttr<CPUDispatchAttr>())
- return MultiVersioning::CPUDispatch;
- if (FD->hasAttr<CPUSpecificAttr>())
- return MultiVersioning::CPUSpecific;
- return MultiVersioning::None;
-}
/// Check the target attribute of the function for MultiVersion
/// validity.
///
@@ -9412,10 +9409,31 @@ static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
return false;
}
+static bool HasNonMultiVersionAttributes(const FunctionDecl *FD,
+ MultiVersionKind MVType) {
+ for (const Attr *A : FD->attrs()) {
+ switch (A->getKind()) {
+ case attr::CPUDispatch:
+ case attr::CPUSpecific:
+ if (MVType != MultiVersionKind::CPUDispatch &&
+ MVType != MultiVersionKind::CPUSpecific)
+ return true;
+ break;
+ case attr::Target:
+ if (MVType != MultiVersionKind::Target)
+ return true;
+ break;
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
const FunctionDecl *NewFD,
bool CausesMV,
- MultiVersioning::Type MVType) {
+ MultiVersionKind MVType) {
enum DoesntSupport {
FuncTemplates = 0,
VirtFuncs = 1,
@@ -9436,8 +9454,8 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
};
bool IsCPUSpecificCPUDispatchMVType =
- MVType == MultiVersioning::CPUDispatch ||
- MVType == MultiVersioning::CPUSpecific;
+ MVType == MultiVersionKind::CPUDispatch ||
+ MVType == MultiVersionKind::CPUSpecific;
if (OldFD && !OldFD->getType()->getAs<FunctionProtoType>()) {
S.Diag(OldFD->getLocation(), diag::err_multiversion_noproto);
@@ -9457,15 +9475,14 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
// For now, disallow all other attributes. These should be opt-in, but
// an analysis of all of them is a future FIXME.
- if (CausesMV && OldFD &&
- std::distance(OldFD->attr_begin(), OldFD->attr_end()) != 1) {
+ if (CausesMV && OldFD && HasNonMultiVersionAttributes(OldFD, MVType)) {
S.Diag(OldFD->getLocation(), diag::err_multiversion_no_other_attrs)
<< IsCPUSpecificCPUDispatchMVType;
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
return true;
}
- if (std::distance(NewFD->attr_begin(), NewFD->attr_end()) != 1)
+ if (HasNonMultiVersionAttributes(NewFD, MVType))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_no_other_attrs)
<< IsCPUSpecificCPUDispatchMVType;
@@ -9498,8 +9515,8 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << DefaultedFuncs;
- if (NewFD->isConstexpr() && (MVType == MultiVersioning::CPUDispatch ||
- MVType == MultiVersioning::CPUSpecific))
+ if (NewFD->isConstexpr() && (MVType == MultiVersionKind::CPUDispatch ||
+ MVType == MultiVersionKind::CPUSpecific))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << ConstexprFuncs;
@@ -9563,19 +9580,19 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
- MultiVersioning::Type MVType,
+ MultiVersionKind MVType,
const TargetAttr *TA,
const CPUDispatchAttr *CPUDisp,
const CPUSpecificAttr *CPUSpec) {
- assert(MVType != MultiVersioning::None &&
+ assert(MVType != MultiVersionKind::None &&
"Function lacks multiversion attribute");
// Target only causes MV if it is default, otherwise this is a normal
// function.
- if (MVType == MultiVersioning::Target && !TA->isDefaultVersion())
+ if (MVType == MultiVersionKind::Target && !TA->isDefaultVersion())
return false;
- if (MVType == MultiVersioning::Target && CheckMultiVersionValue(S, FD)) {
+ if (MVType == MultiVersionKind::Target && CheckMultiVersionValue(S, FD)) {
FD->setInvalidDecl();
return true;
}
@@ -9589,6 +9606,15 @@ static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
return false;
}
+static bool PreviousDeclsHaveMultiVersionAttribute(const FunctionDecl *FD) {
+ for (const Decl *D = FD->getPreviousDecl(); D; D = D->getPreviousDecl()) {
+ if (D->getAsFunction()->getMultiVersionKind() != MultiVersionKind::None)
+ return true;
+ }
+
+ return false;
+}
+
static bool CheckTargetCausesMultiVersioning(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD, const TargetAttr *NewTA,
bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
@@ -9596,11 +9622,12 @@ static bool CheckTargetCausesMultiVersioning(
const auto *OldTA = OldFD->getAttr<TargetAttr>();
TargetAttr::ParsedTargetAttr NewParsed = NewTA->parse();
// Sort order doesn't matter, it just needs to be consistent.
- llvm::sort(NewParsed.Features.begin(), NewParsed.Features.end());
+ llvm::sort(NewParsed.Features);
// If the old decl is NOT MultiVersioned yet, and we don't cause that
// to change, this is a simple redeclaration.
- if (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr())
+ if (!NewTA->isDefaultVersion() &&
+ (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr()))
return false;
// Otherwise, this decl causes MultiVersioning.
@@ -9612,7 +9639,7 @@ static bool CheckTargetCausesMultiVersioning(
}
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, true,
- MultiVersioning::Target)) {
+ MultiVersionKind::Target)) {
NewFD->setInvalidDecl();
return true;
}
@@ -9622,6 +9649,15 @@ static bool CheckTargetCausesMultiVersioning(
return true;
}
+ // If this is 'default', permit the forward declaration.
+ if (!OldFD->isMultiVersion() && !OldTA && NewTA->isDefaultVersion()) {
+ Redeclaration = true;
+ OldDecl = OldFD;
+ OldFD->setIsMultiVersion();
+ NewFD->setIsMultiVersion();
+ return false;
+ }
+
if (CheckMultiVersionValue(S, OldFD)) {
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
NewFD->setInvalidDecl();
@@ -9640,7 +9676,10 @@ static bool CheckTargetCausesMultiVersioning(
for (const auto *FD : OldFD->redecls()) {
const auto *CurTA = FD->getAttr<TargetAttr>();
- if (!CurTA || CurTA->isInherited()) {
+ // We allow forward declarations before ANY multiversioning attributes, but
+ // nothing after the fact.
+ if (PreviousDeclsHaveMultiVersionAttribute(FD) &&
+ (!CurTA || CurTA->isInherited())) {
S.Diag(FD->getLocation(), diag::err_multiversion_required_in_redecl)
<< 0;
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
@@ -9662,17 +9701,17 @@ static bool CheckTargetCausesMultiVersioning(
/// multiversioned declaration collection.
static bool CheckMultiVersionAdditionalDecl(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
- MultiVersioning::Type NewMVType, const TargetAttr *NewTA,
+ MultiVersionKind NewMVType, const TargetAttr *NewTA,
const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
LookupResult &Previous) {
- MultiVersioning::Type OldMVType = getMultiVersionType(OldFD);
+ MultiVersionKind OldMVType = OldFD->getMultiVersionKind();
// Disallow mixing of multiversioning types.
- if ((OldMVType == MultiVersioning::Target &&
- NewMVType != MultiVersioning::Target) ||
- (NewMVType == MultiVersioning::Target &&
- OldMVType != MultiVersioning::Target)) {
+ if ((OldMVType == MultiVersionKind::Target &&
+ NewMVType != MultiVersionKind::Target) ||
+ (NewMVType == MultiVersionKind::Target &&
+ OldMVType != MultiVersionKind::Target)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
@@ -9682,7 +9721,7 @@ static bool CheckMultiVersionAdditionalDecl(
TargetAttr::ParsedTargetAttr NewParsed;
if (NewTA) {
NewParsed = NewTA->parse();
- llvm::sort(NewParsed.Features.begin(), NewParsed.Features.end());
+ llvm::sort(NewParsed.Features);
}
bool UseMemberUsingDeclRules =
@@ -9697,7 +9736,7 @@ static bool CheckMultiVersionAdditionalDecl(
if (S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
continue;
- if (NewMVType == MultiVersioning::Target) {
+ if (NewMVType == MultiVersionKind::Target) {
const auto *CurTA = CurFD->getAttr<TargetAttr>();
if (CurTA->getFeaturesStr() == NewTA->getFeaturesStr()) {
NewFD->setIsMultiVersion();
@@ -9720,7 +9759,7 @@ static bool CheckMultiVersionAdditionalDecl(
// Handle CPUDispatch/CPUSpecific versions.
// Only 1 CPUDispatch function is allowed, this will make it go through
// the redeclaration errors.
- if (NewMVType == MultiVersioning::CPUDispatch &&
+ if (NewMVType == MultiVersionKind::CPUDispatch &&
CurFD->hasAttr<CPUDispatchAttr>()) {
if (CurCPUDisp->cpus_size() == NewCPUDisp->cpus_size() &&
std::equal(
@@ -9741,7 +9780,7 @@ static bool CheckMultiVersionAdditionalDecl(
NewFD->setInvalidDecl();
return true;
}
- if (NewMVType == MultiVersioning::CPUSpecific && CurCPUSpec) {
+ if (NewMVType == MultiVersionKind::CPUSpecific && CurCPUSpec) {
if (CurCPUSpec->cpus_size() == NewCPUSpec->cpus_size() &&
std::equal(
@@ -9777,7 +9816,7 @@ static bool CheckMultiVersionAdditionalDecl(
// Else, this is simply a non-redecl case. Checking the 'value' is only
// necessary in the Target case, since The CPUSpecific/Dispatch cases are
// handled in the attribute adding step.
- if (NewMVType == MultiVersioning::Target &&
+ if (NewMVType == MultiVersionKind::Target &&
CheckMultiVersionValue(S, NewFD)) {
NewFD->setInvalidDecl();
return true;
@@ -9788,6 +9827,15 @@ static bool CheckMultiVersionAdditionalDecl(
return true;
}
+ // Permit forward declarations in the case where these two are compatible.
+ if (!OldFD->isMultiVersion()) {
+ OldFD->setIsMultiVersion();
+ NewFD->setIsMultiVersion();
+ Redeclaration = true;
+ OldDecl = OldFD;
+ return false;
+ }
+
NewFD->setIsMultiVersion();
Redeclaration = false;
MergeTypeWithPrevious = false;
@@ -9819,14 +9867,14 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
return true;
}
- MultiVersioning::Type MVType = getMultiVersionType(NewFD);
+ MultiVersionKind MVType = NewFD->getMultiVersionKind();
// Main isn't allowed to become a multiversion function, however it IS
// permitted to have 'main' be marked with the 'target' optimization hint.
if (NewFD->isMain()) {
- if ((MVType == MultiVersioning::Target && NewTA->isDefaultVersion()) ||
- MVType == MultiVersioning::CPUDispatch ||
- MVType == MultiVersioning::CPUSpecific) {
+ if ((MVType == MultiVersionKind::Target && NewTA->isDefaultVersion()) ||
+ MVType == MultiVersionKind::CPUDispatch ||
+ MVType == MultiVersionKind::CPUSpecific) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_allowed_on_main);
NewFD->setInvalidDecl();
return true;
@@ -9839,7 +9887,7 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
NewFD->getDeclContext()->getRedeclContext()) {
// If there's no previous declaration, AND this isn't attempting to cause
// multiversioning, this isn't an error condition.
- if (MVType == MultiVersioning::None)
+ if (MVType == MultiVersionKind::None)
return false;
return CheckMultiVersionFirstFunction(S, NewFD, MVType, NewTA, NewCPUDisp,
NewCPUSpec);
@@ -9847,29 +9895,21 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
FunctionDecl *OldFD = OldDecl->getAsFunction();
- if (!OldFD->isMultiVersion() && MVType == MultiVersioning::None)
+ if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::None)
return false;
- if (OldFD->isMultiVersion() && MVType == MultiVersioning::None) {
+ if (OldFD->isMultiVersion() && MVType == MultiVersionKind::None) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_required_in_redecl)
- << (getMultiVersionType(OldFD) != MultiVersioning::Target);
+ << (OldFD->getMultiVersionKind() != MultiVersionKind::Target);
NewFD->setInvalidDecl();
return true;
}
// Handle the target potentially causes multiversioning case.
- if (!OldFD->isMultiVersion() && MVType == MultiVersioning::Target)
+ if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::Target)
return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
Redeclaration, OldDecl,
MergeTypeWithPrevious, Previous);
- // Previous declarations lack CPUDispatch/CPUSpecific.
- if (!OldFD->isMultiVersion()) {
- S.Diag(OldFD->getLocation(), diag::err_multiversion_required_in_redecl)
- << 1;
- S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
- NewFD->setInvalidDecl();
- return true;
- }
// At this point, we have a multiversion function decl (in OldFD) AND an
// appropriate attribute in the current function decl. Resolve that these are
@@ -9982,7 +10022,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
if (!getLangOpts().CPlusPlus14 && MD && MD->isConstexpr() &&
!MD->isStatic() && !isa<CXXConstructorDecl>(MD) &&
- (MD->getTypeQualifiers() & Qualifiers::Const) == 0) {
+ !MD->getTypeQualifiers().hasConst()) {
CXXMethodDecl *OldMD = nullptr;
if (OldDecl)
OldMD = dyn_cast_or_null<CXXMethodDecl>(OldDecl->getAsFunction());
@@ -9990,7 +10030,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
const FunctionProtoType *FPT =
MD->getType()->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- EPI.TypeQuals |= Qualifiers::Const;
+ EPI.TypeQuals.addConst();
MD->setType(Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI));
@@ -10022,11 +10062,17 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (FunctionTemplateDecl *OldTemplateDecl =
dyn_cast<FunctionTemplateDecl>(OldDecl)) {
auto *OldFD = OldTemplateDecl->getTemplatedDecl();
- NewFD->setPreviousDeclaration(OldFD);
- adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
FunctionTemplateDecl *NewTemplateDecl
= NewFD->getDescribedFunctionTemplate();
assert(NewTemplateDecl && "Template/non-template mismatch");
+
+ // The call to MergeFunctionDecl above may have created some state in
+ // NewTemplateDecl that needs to be merged with OldTemplateDecl before we
+ // can add it as a redeclaration.
+ NewTemplateDecl->mergePrevDecl(OldTemplateDecl);
+
+ NewFD->setPreviousDeclaration(OldFD);
+ adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
if (NewFD->isCXXClassMember()) {
NewFD->setAccess(OldTemplateDecl->getAccess());
NewTemplateDecl->setAccess(OldTemplateDecl->getAccess());
@@ -10479,7 +10525,7 @@ namespace {
Expr *Base = E;
bool ReferenceField = false;
- // Get the field memebers used.
+ // Get the field members used.
while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
if (!FD)
@@ -11707,7 +11753,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// In Objective-C, don't allow jumps past the implicit initialization of a
// local retaining variable.
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
var->hasLocalStorage()) {
switch (var->getType().getObjCLifetime()) {
case Qualifiers::OCL_None:
@@ -11826,37 +11872,8 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
QualType type = var->getType();
if (type->isDependentType()) return;
- // __block variables might require us to capture a copy-initializer.
- if (var->hasAttr<BlocksAttr>()) {
- // It's currently invalid to ever have a __block variable with an
- // array type; should we diagnose that here?
-
- // Regardless, we don't want to ignore array nesting when
- // constructing this copy.
- if (type->isStructureOrClassType()) {
- EnterExpressionEvaluationContext scope(
- *this, ExpressionEvaluationContext::PotentiallyEvaluated);
- SourceLocation poi = var->getLocation();
- Expr *varRef =new (Context) DeclRefExpr(var, false, type, VK_LValue, poi);
- ExprResult result
- = PerformMoveOrCopyInitialization(
- InitializedEntity::InitializeBlock(poi, type, false),
- var, var->getType(), varRef, /*AllowNRVO=*/true);
- if (!result.isInvalid()) {
- result = MaybeCreateExprWithCleanups(result);
- Expr *init = result.getAs<Expr>();
- Context.setBlockVarCopyInit(var, init, canThrow(init));
- }
-
- // The destructor's exception spefication is needed when IRGen generates
- // block copy/destroy functions. Resolve it here.
- if (const CXXRecordDecl *RD = type->getAsCXXRecordDecl())
- if (CXXDestructorDecl *DD = RD->getDestructor()) {
- auto *FPT = DD->getType()->getAs<FunctionProtoType>();
- FPT = ResolveExceptionSpec(poi, FPT);
- }
- }
- }
+ if (var->hasAttr<BlocksAttr>())
+ getCurFunction()->addByrefBlockVar(var);
Expr *Init = var->getInit();
bool IsGlobal = GlobalStorage && !var->isStaticLocal();
@@ -11947,6 +11964,49 @@ static bool hasDependentAlignment(VarDecl *VD) {
return false;
}
+/// Check if VD needs to be dllexport/dllimport due to being in a
+/// dllexport/import function.
+void Sema::CheckStaticLocalForDllExport(VarDecl *VD) {
+ assert(VD->isStaticLocal());
+
+ auto *FD = dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod());
+
+ // Find outermost function when VD is in lambda function.
+ while (FD && !getDLLAttr(FD) &&
+ !FD->hasAttr<DLLExportStaticLocalAttr>() &&
+ !FD->hasAttr<DLLImportStaticLocalAttr>()) {
+ FD = dyn_cast_or_null<FunctionDecl>(FD->getParentFunctionOrMethod());
+ }
+
+ if (!FD)
+ return;
+
+ // Static locals inherit dll attributes from their function.
+ if (Attr *A = getDLLAttr(FD)) {
+ auto *NewAttr = cast<InheritableAttr>(A->clone(getASTContext()));
+ NewAttr->setInherited(true);
+ VD->addAttr(NewAttr);
+ } else if (Attr *A = FD->getAttr<DLLExportStaticLocalAttr>()) {
+ auto *NewAttr = ::new (getASTContext()) DLLExportAttr(A->getRange(),
+ getASTContext(),
+ A->getSpellingListIndex());
+ NewAttr->setInherited(true);
+ VD->addAttr(NewAttr);
+
+ // Export this function to enforce exporting this static variable even
+ // if it is not used in this compilation unit.
+ if (!FD->hasAttr<DLLExportAttr>())
+ FD->addAttr(NewAttr);
+
+ } else if (Attr *A = FD->getAttr<DLLImportStaticLocalAttr>()) {
+ auto *NewAttr = ::new (getASTContext()) DLLImportAttr(A->getRange(),
+ getASTContext(),
+ A->getSpellingListIndex());
+ NewAttr->setInherited(true);
+ VD->addAttr(NewAttr);
+ }
+}
+
/// FinalizeDeclaration - called by ParseDeclarationAfterDeclarator to perform
/// any semantic actions necessary after any initializer has been attached.
void Sema::FinalizeDeclaration(Decl *ThisDecl) {
@@ -12000,14 +12060,9 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
}
if (VD->isStaticLocal()) {
- if (FunctionDecl *FD =
- dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) {
- // Static locals inherit dll attributes from their function.
- if (Attr *A = getDLLAttr(FD)) {
- auto *NewAttr = cast<InheritableAttr>(A->clone(getASTContext()));
- NewAttr->setInherited(true);
- VD->addAttr(NewAttr);
- }
+ CheckStaticLocalForDllExport(VD);
+
+ if (dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) {
// CUDA 8.0 E.3.9.4: Within the body of a __device__ or __global__
// function, only __shared__ variables or variables without any device
// memory qualifiers may be declared with static storage class.
@@ -12687,6 +12742,29 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
}
}
}
+
+ if (!Definition)
+ // Similar to friend functions a friend function template may be a
+ // definition and do not have a body if it is instantiated in a class
+ // template.
+ if (FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) {
+ for (auto I : FTD->redecls()) {
+ auto D = cast<FunctionTemplateDecl>(I);
+ if (D != FTD) {
+ assert(!D->isThisDeclarationADefinition() &&
+ "More than one definition in redeclaration chain");
+ if (D->getFriendObjectKind() != Decl::FOK_None)
+ if (FunctionTemplateDecl *FT =
+ D->getInstantiatedFromMemberTemplate()) {
+ if (FT->isThisDeclarationADefinition()) {
+ Definition = D->getTemplatedDecl();
+ break;
+ }
+ }
+ }
+ }
+ }
+
if (!Definition)
return;
@@ -12778,6 +12856,7 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
// Parsing the function declaration failed in some way. Push on a fake scope
// anyway so we can try to parse the function body.
PushFunctionScope();
+ PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
return D;
}
@@ -12788,6 +12867,11 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
else
FD = cast<FunctionDecl>(D);
+ // Do not push if it is a lambda because one is already pushed when building
+ // the lambda in ActOnStartOfLambdaDefinition().
+ if (!isLambdaCallOperator(FD))
+ PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+
// Check for defining attributes before the check for redefinition.
if (const auto *Attr = FD->getAttr<AliasAttr>()) {
Diag(Attr->getLocation(), diag::err_alias_is_definition) << FD << 0;
@@ -12996,6 +13080,21 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) {
return ActOnFinishFunctionBody(D, BodyArg, false);
}
+/// RAII object that pops an ExpressionEvaluationContext when exiting a function
+/// body.
+class ExitFunctionBodyRAII {
+public:
+ ExitFunctionBodyRAII(Sema &S, bool IsLambda) : S(S), IsLambda(IsLambda) {}
+ ~ExitFunctionBodyRAII() {
+ if (!IsLambda)
+ S.PopExpressionEvaluationContext();
+ }
+
+private:
+ Sema &S;
+ bool IsLambda = false;
+};
+
Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
bool IsInstantiation) {
FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr;
@@ -13006,6 +13105,11 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (getLangOpts().CoroutinesTS && getCurFunction()->isCoroutine())
CheckCompletedCoroutineBody(FD, Body);
+ // Do not call PopExpressionEvaluationContext() if it is a lambda because one
+ // is already popped when finishing the lambda in BuildLambdaExpr(). This is
+ // meant to pop the context added in ActOnStartOfFunctionDef().
+ ExitFunctionBodyRAII ExitRAII(*this, isLambdaCallOperator(FD));
+
if (FD) {
FD->setBody(Body);
FD->setWillHaveBody(false);
@@ -13060,7 +13164,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (!FD->isInvalidDecl()) {
// Don't diagnose unused parameters of defaulted or deleted functions.
- if (!FD->isDeleted() && !FD->isDefaulted())
+ if (!FD->isDeleted() && !FD->isDefaulted() && !FD->hasSkippedBody())
DiagnoseUnusedParameters(FD->parameters());
DiagnoseSizeOfParametersAndReturnValue(FD->parameters(),
FD->getReturnType(), FD);
@@ -13155,7 +13259,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
assert(MD == getCurMethodDecl() && "Method parsing confused");
MD->setBody(Body);
if (!MD->isInvalidDecl()) {
- DiagnoseUnusedParameters(MD->parameters());
+ if (!MD->hasSkippedBody())
+ DiagnoseUnusedParameters(MD->parameters());
DiagnoseSizeOfParametersAndReturnValue(MD->parameters(),
MD->getReturnType(), MD);
@@ -13745,76 +13850,106 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
// struct class-key shall be used to refer to a class (clause 9)
// declared using the class or struct class-key.
TagTypeKind OldTag = Previous->getTagKind();
- if (!isDefinition || !isClassCompatTagKind(NewTag))
- if (OldTag == NewTag)
+ if (OldTag != NewTag &&
+ !(isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)))
+ return false;
+
+ // Tags are compatible, but we might still want to warn on mismatched tags.
+ // Non-class tags can't be mismatched at this point.
+ if (!isClassCompatTagKind(NewTag))
+ return true;
+
+ // Declarations for which -Wmismatched-tags is disabled are entirely ignored
+ // by our warning analysis. We don't want to warn about mismatches with (eg)
+ // declarations in system headers that are designed to be specialized, but if
+ // a user asks us to warn, we should warn if their code contains mismatched
+ // declarations.
+ auto IsIgnoredLoc = [&](SourceLocation Loc) {
+ return getDiagnostics().isIgnored(diag::warn_struct_class_tag_mismatch,
+ Loc);
+ };
+ if (IsIgnoredLoc(NewTagLoc))
+ return true;
+
+ auto IsIgnored = [&](const TagDecl *Tag) {
+ return IsIgnoredLoc(Tag->getLocation());
+ };
+ while (IsIgnored(Previous)) {
+ Previous = Previous->getPreviousDecl();
+ if (!Previous)
return true;
+ OldTag = Previous->getTagKind();
+ }
- if (isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)) {
- // Warn about the struct/class tag mismatch.
- bool isTemplate = false;
- if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
- isTemplate = Record->getDescribedClassTemplate();
+ bool isTemplate = false;
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
+ isTemplate = Record->getDescribedClassTemplate();
- if (inTemplateInstantiation()) {
+ if (inTemplateInstantiation()) {
+ if (OldTag != NewTag) {
// In a template instantiation, do not offer fix-its for tag mismatches
// since they usually mess up the template instead of fixing the problem.
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(OldTag);
+ // FIXME: Note previous location?
+ }
+ return true;
+ }
+
+ if (isDefinition) {
+ // On definitions, check all previous tags and issue a fix-it for each
+ // one that doesn't match the current tag.
+ if (Previous->getDefinition()) {
+ // Don't suggest fix-its for redefinitions.
return true;
}
- if (isDefinition) {
- // On definitions, check previous tags and issue a fix-it for each
- // one that doesn't match the current tag.
- if (Previous->getDefinition()) {
- // Don't suggest fix-its for redefinitions.
- return true;
- }
+ bool previousMismatch = false;
+ for (const TagDecl *I : Previous->redecls()) {
+ if (I->getTagKind() != NewTag) {
+ // Ignore previous declarations for which the warning was disabled.
+ if (IsIgnored(I))
+ continue;
- bool previousMismatch = false;
- for (auto I : Previous->redecls()) {
- if (I->getTagKind() != NewTag) {
- if (!previousMismatch) {
- previousMismatch = true;
- Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch)
- << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
- << getRedeclDiagFromTagKind(I->getTagKind());
- }
- Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion)
- << getRedeclDiagFromTagKind(NewTag)
- << FixItHint::CreateReplacement(I->getInnerLocStart(),
- TypeWithKeyword::getTagTypeKindName(NewTag));
+ if (!previousMismatch) {
+ previousMismatch = true;
+ Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch)
+ << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
+ << getRedeclDiagFromTagKind(I->getTagKind());
}
+ Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion)
+ << getRedeclDiagFromTagKind(NewTag)
+ << FixItHint::CreateReplacement(I->getInnerLocStart(),
+ TypeWithKeyword::getTagTypeKindName(NewTag));
}
- return true;
- }
-
- // Check for a previous definition. If current tag and definition
- // are same type, do nothing. If no definition, but disagree with
- // with previous tag type, give a warning, but no fix-it.
- const TagDecl *Redecl = Previous->getDefinition() ?
- Previous->getDefinition() : Previous;
- if (Redecl->getTagKind() == NewTag) {
- return true;
}
+ return true;
+ }
+ // Identify the prevailing tag kind: this is the kind of the definition (if
+ // there is a non-ignored definition), or otherwise the kind of the prior
+ // (non-ignored) declaration.
+ const TagDecl *PrevDef = Previous->getDefinition();
+ if (PrevDef && IsIgnored(PrevDef))
+ PrevDef = nullptr;
+ const TagDecl *Redecl = PrevDef ? PrevDef : Previous;
+ if (Redecl->getTagKind() != NewTag) {
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(OldTag);
Diag(Redecl->getLocation(), diag::note_previous_use);
// If there is a previous definition, suggest a fix-it.
- if (Previous->getDefinition()) {
- Diag(NewTagLoc, diag::note_struct_class_suggestion)
- << getRedeclDiagFromTagKind(Redecl->getTagKind())
- << FixItHint::CreateReplacement(SourceRange(NewTagLoc),
- TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind()));
+ if (PrevDef) {
+ Diag(NewTagLoc, diag::note_struct_class_suggestion)
+ << getRedeclDiagFromTagKind(Redecl->getTagKind())
+ << FixItHint::CreateReplacement(SourceRange(NewTagLoc),
+ TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind()));
}
-
- return true;
}
- return false;
+
+ return true;
}
/// Add a minimal nested name specifier fixit hint to allow lookup of a tag name
@@ -14615,7 +14750,7 @@ CreateNewDecl:
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
- if (IsFixed && (getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) &&
+ if (IsFixed && (getLangOpts().CPlusPlus11 || getLangOpts().ObjC) &&
cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
// Conflicts are diagnosed above. Do nothing.
@@ -15090,22 +15225,6 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
}
}
- // TR 18037 does not allow fields to be declared with address spaces.
- if (T.getQualifiers().hasAddressSpace() ||
- T->isDependentAddressSpaceType() ||
- T->getBaseElementTypeUnsafe()->isDependentAddressSpaceType()) {
- Diag(Loc, diag::err_field_with_address_space);
- D.setInvalidType();
- }
-
- // OpenCL v1.2 s6.9b,r & OpenCL v2.0 s6.12.5 - The following types cannot be
- // used as structure or union field: image, sampler, event or block types.
- if (LangOpts.OpenCL && (T->isEventT() || T->isImageType() ||
- T->isSamplerT() || T->isBlockPointerType())) {
- Diag(Loc, diag::err_opencl_type_struct_or_union_field) << T;
- D.setInvalidType();
- }
-
DiagnoseFunctionSpecifiers(D.getDeclSpec());
if (D.getDeclSpec().isInlineSpecified())
@@ -15217,12 +15336,30 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
}
}
- // OpenCL v1.2 s6.9.c: bitfields are not supported.
- if (BitWidth && getLangOpts().OpenCL) {
- Diag(Loc, diag::err_opencl_bitfields);
+ // TR 18037 does not allow fields to be declared with address space
+ if (T.getQualifiers().hasAddressSpace() || T->isDependentAddressSpaceType() ||
+ T->getBaseElementTypeUnsafe()->isDependentAddressSpaceType()) {
+ Diag(Loc, diag::err_field_with_address_space);
+ Record->setInvalidDecl();
InvalidDecl = true;
}
+ if (LangOpts.OpenCL) {
+ // OpenCL v1.2 s6.9b,r & OpenCL v2.0 s6.12.5 - The following types cannot be
+ // used as structure or union field: image, sampler, event or block types.
+ if (T->isEventT() || T->isImageType() || T->isSamplerT() ||
+ T->isBlockPointerType()) {
+ Diag(Loc, diag::err_opencl_type_struct_or_union_field) << T;
+ Record->setInvalidDecl();
+ InvalidDecl = true;
+ }
+ // OpenCL v1.2 s6.9.c: bitfields are not supported.
+ if (BitWidth) {
+ Diag(Loc, diag::err_opencl_bitfields);
+ InvalidDecl = true;
+ }
+ }
+
// Anonymous bit-fields cannot be cv-qualified (CWG 2229).
if (!InvalidDecl && getLangOpts().CPlusPlus && !II && BitWidth &&
T.hasQualifiers()) {
@@ -15794,7 +15931,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
}
ObjCFieldLifetimeErrReported = true;
}
- } else if (getLangOpts().ObjC1 &&
+ } else if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC &&
Record && !Record->hasObjectMember()) {
if (FD->getType()->isObjCObjectPointerType() ||
@@ -16292,8 +16429,10 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
// Verify that there isn't already something declared with this name in this
// scope.
- NamedDecl *PrevDecl = LookupSingleName(S, Id, IdLoc, LookupOrdinaryName,
- ForVisibleRedeclaration);
+ LookupResult R(*this, Id, IdLoc, LookupOrdinaryName, ForVisibleRedeclaration);
+ LookupName(R, S);
+ NamedDecl *PrevDecl = R.getAsSingle<NamedDecl>();
+
if (PrevDecl && PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(IdLoc, PrevDecl);
@@ -16316,6 +16455,11 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
return nullptr;
if (PrevDecl) {
+ if (!TheEnumDecl->isScoped() && isa<ValueDecl>(PrevDecl)) {
+ // Check for other kinds of shadowing not already handled.
+ CheckShadow(New, PrevDecl, R);
+ }
+
// When in C++, we may get a TagDecl with the same name; in this case the
// enum constant will 'hide' the tag.
assert((getLangOpts().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
@@ -16573,7 +16717,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
NumNegativeBits = std::max(NumNegativeBits,
(unsigned)InitVal.getMinSignedBits());
- // Keep track of whether every enum element has type int (very commmon).
+ // Keep track of whether every enum element has type int (very common).
if (AllElementsInt)
AllElementsInt = ECD->getType() == Context.IntTy;
}
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index 17b93e0add..78374b8089 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -392,9 +392,49 @@ bool Sema::checkStringLiteralArgumentAttr(const ParsedAttr &AL, unsigned ArgNum,
/// Applies the given attribute to the Decl without performing any
/// additional semantic checking.
template <typename AttrType>
+static void handleSimpleAttribute(Sema &S, Decl *D, SourceRange SR,
+ unsigned SpellingIndex) {
+ D->addAttr(::new (S.Context) AttrType(SR, S.Context, SpellingIndex));
+}
+
+template <typename AttrType>
static void handleSimpleAttribute(Sema &S, Decl *D, const ParsedAttr &AL) {
- D->addAttr(::new (S.Context) AttrType(AL.getRange(), S.Context,
- AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<AttrType>(S, D, AL.getRange(),
+ AL.getAttributeSpellingListIndex());
+}
+
+
+template <typename... DiagnosticArgs>
+static const Sema::SemaDiagnosticBuilder&
+appendDiagnostics(const Sema::SemaDiagnosticBuilder &Bldr) {
+ return Bldr;
+}
+
+template <typename T, typename... DiagnosticArgs>
+static const Sema::SemaDiagnosticBuilder&
+appendDiagnostics(const Sema::SemaDiagnosticBuilder &Bldr, T &&ExtraArg,
+ DiagnosticArgs &&... ExtraArgs) {
+ return appendDiagnostics(Bldr << std::forward<T>(ExtraArg),
+ std::forward<DiagnosticArgs>(ExtraArgs)...);
+}
+
+/// Add an attribute {@code AttrType} to declaration {@code D},
+/// provided the given {@code Check} function returns {@code true}
+/// on type of {@code D}.
+/// If check does not pass, emit diagnostic {@code DiagID},
+/// passing in all parameters specified in {@code ExtraArgs}.
+template <typename AttrType, typename... DiagnosticArgs>
+static void
+handleSimpleAttributeWithCheck(Sema &S, ValueDecl *D, SourceRange SR,
+ unsigned SpellingIndex,
+ llvm::function_ref<bool(QualType)> Check,
+ unsigned DiagID, DiagnosticArgs... ExtraArgs) {
+ if (!Check(D->getType())) {
+ Sema::SemaDiagnosticBuilder DB = S.Diag(D->getBeginLoc(), DiagID);
+ appendDiagnostics(DB, std::forward<DiagnosticArgs>(ExtraArgs)...);
+ return;
+ }
+ handleSimpleAttribute<AttrType>(S, D, SR, SpellingIndex);
}
template <typename AttrType>
@@ -2410,6 +2450,15 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getReplacementExpr()))
Replacement = SE->getString();
+ if (II->isStr("swift")) {
+ if (Introduced.isValid() || Obsoleted.isValid() ||
+ (!IsUnavailable && !Deprecated.isValid())) {
+ S.Diag(AL.getLoc(),
+ diag::warn_availability_swift_unavailable_deprecated_only);
+ return;
+ }
+ }
+
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND, AL.getRange(), II,
false/*Implicit*/,
Introduced.Version,
@@ -4143,7 +4192,7 @@ static void handleSharedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
const auto *VD = cast<VarDecl>(D);
// extern __shared__ is only allowed on arrays with no length (e.g.
// "int x[]").
- if (!S.getLangOpts().CUDARelocatableDeviceCode && VD->hasExternalStorage() &&
+ if (!S.getLangOpts().GPURelocatableDeviceCode && VD->hasExternalStorage() &&
!isa<IncompleteArrayType>(VD->getType())) {
S.Diag(AL.getLoc(), diag::err_cuda_extern_shared) << VD;
return;
@@ -4282,6 +4331,11 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AL.getAttributeSpellingListIndex()));
return;
}
+ case ParsedAttr::AT_AArch64VectorPcs:
+ D->addAttr(::new(S.Context)
+ AArch64VectorPcsAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
+ return;
case ParsedAttr::AT_IntelOclBicc:
D->addAttr(::new (S.Context)
IntelOclBiccAttr(AL.getRange(), S.Context,
@@ -4359,6 +4413,9 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_VectorCall:
CC = CC_X86VectorCall;
break;
+ case ParsedAttr::AT_AArch64VectorPcs:
+ CC = CC_AArch64VectorCall;
+ break;
case ParsedAttr::AT_RegCall:
CC = CC_X86RegCall;
break;
@@ -4694,58 +4751,70 @@ static void handleXRayLogArgsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
//===----------------------------------------------------------------------===//
// Checker-specific attribute handlers.
//===----------------------------------------------------------------------===//
-
static bool isValidSubjectOfNSReturnsRetainedAttribute(QualType QT) {
return QT->isDependentType() || QT->isObjCRetainableType();
}
-static bool isValidSubjectOfNSAttribute(Sema &S, QualType QT) {
+static bool isValidSubjectOfNSAttribute(QualType QT) {
return QT->isDependentType() || QT->isObjCObjectPointerType() ||
- S.Context.isObjCNSObjectType(QT);
+ QT->isObjCNSObjectType();
}
-static bool isValidSubjectOfCFAttribute(Sema &S, QualType QT) {
+static bool isValidSubjectOfCFAttribute(QualType QT) {
return QT->isDependentType() || QT->isPointerType() ||
- isValidSubjectOfNSAttribute(S, QT);
+ isValidSubjectOfNSAttribute(QT);
}
-static void handleNSConsumedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- S.AddNSConsumedAttr(AL.getRange(), D, AL.getAttributeSpellingListIndex(),
- AL.getKind() == ParsedAttr::AT_NSConsumed,
- /*template instantiation*/ false);
+static bool isValidSubjectOfOSAttribute(QualType QT) {
+ return QT->isDependentType() || QT->isPointerType();
}
-void Sema::AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
- unsigned SpellingIndex, bool IsNSConsumed,
- bool IsTemplateInstantiation) {
- const auto *Param = cast<ParmVarDecl>(D);
- bool TypeOK;
-
- if (IsNSConsumed)
- TypeOK = isValidSubjectOfNSAttribute(*this, Param->getType());
- else
- TypeOK = isValidSubjectOfCFAttribute(*this, Param->getType());
+void Sema::AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
+ RetainOwnershipKind K,
+ bool IsTemplateInstantiation) {
+ ValueDecl *VD = cast<ValueDecl>(D);
+ switch (K) {
+ case RetainOwnershipKind::OS:
+ handleSimpleAttributeWithCheck<OSConsumedAttr>(
+ *this, VD, SR, SpellingIndex, &isValidSubjectOfOSAttribute,
+ diag::warn_ns_attribute_wrong_parameter_type,
+ /*ExtraArgs=*/SR, "os_consumed", /*pointers*/ 1);
+ return;
+ case RetainOwnershipKind::NS:
+ handleSimpleAttributeWithCheck<NSConsumedAttr>(
+ *this, VD, SR, SpellingIndex, &isValidSubjectOfNSAttribute,
- if (!TypeOK) {
- // These attributes are normally just advisory, but in ARC, ns_consumed
- // is significant. Allow non-dependent code to contain inappropriate
- // attributes even in ARC, but require template instantiations to be
- // set up correctly.
- Diag(D->getBeginLoc(), (IsTemplateInstantiation && IsNSConsumed &&
- getLangOpts().ObjCAutoRefCount
- ? diag::err_ns_attribute_wrong_parameter_type
- : diag::warn_ns_attribute_wrong_parameter_type))
- << AttrRange << (IsNSConsumed ? "ns_consumed" : "cf_consumed")
- << (IsNSConsumed ? /*objc pointers*/ 0 : /*cf pointers*/ 1);
+ // These attributes are normally just advisory, but in ARC, ns_consumed
+ // is significant. Allow non-dependent code to contain inappropriate
+ // attributes even in ARC, but require template instantiations to be
+ // set up correctly.
+ ((IsTemplateInstantiation && getLangOpts().ObjCAutoRefCount)
+ ? diag::err_ns_attribute_wrong_parameter_type
+ : diag::warn_ns_attribute_wrong_parameter_type),
+ /*ExtraArgs=*/SR, "ns_consumed", /*objc pointers*/ 0);
+ return;
+ case RetainOwnershipKind::CF:
+ handleSimpleAttributeWithCheck<CFConsumedAttr>(
+ *this, VD, SR, SpellingIndex,
+ &isValidSubjectOfCFAttribute,
+ diag::warn_ns_attribute_wrong_parameter_type,
+ /*ExtraArgs=*/SR, "cf_consumed", /*pointers*/1);
return;
}
+}
- if (IsNSConsumed)
- D->addAttr(::new (Context)
- NSConsumedAttr(AttrRange, Context, SpellingIndex));
- else
- D->addAttr(::new (Context)
- CFConsumedAttr(AttrRange, Context, SpellingIndex));
+static Sema::RetainOwnershipKind
+parsedAttrToRetainOwnershipKind(const ParsedAttr &AL) {
+ switch (AL.getKind()) {
+ case ParsedAttr::AT_CFConsumed:
+ return Sema::RetainOwnershipKind::CF;
+ case ParsedAttr::AT_OSConsumed:
+ return Sema::RetainOwnershipKind::OS;
+ case ParsedAttr::AT_NSConsumed:
+ return Sema::RetainOwnershipKind::NS;
+ default:
+ llvm_unreachable("Wrong argument supplied");
+ }
}
bool Sema::checkNSReturnsRetainedReturnType(SourceLocation Loc, QualType QT) {
@@ -4757,24 +4826,26 @@ bool Sema::checkNSReturnsRetainedReturnType(SourceLocation Loc, QualType QT) {
return true;
}
-static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
+static void handleXReturnsXRetainedAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
QualType ReturnType;
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
ReturnType = MD->getReturnType();
- else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
- (AL.getKind() == ParsedAttr::AT_NSReturnsRetained))
+ } else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
+ (AL.getKind() == ParsedAttr::AT_NSReturnsRetained)) {
return; // ignore: was handled as a type attribute
- else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D))
+ } else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
ReturnType = PD->getType();
- else if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
ReturnType = FD->getReturnType();
- else if (const auto *Param = dyn_cast<ParmVarDecl>(D)) {
+ } else if (const auto *Param = dyn_cast<ParmVarDecl>(D)) {
+ // Attributes on parameters are used for out-parameters,
+ // passed as pointers-to-pointers.
ReturnType = Param->getType()->getPointeeType();
if (ReturnType.isNull()) {
S.Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_parameter_type)
- << AL << /*pointer-to-CF*/ 2 << AL.getRange();
+ << AL << /*pointer-to-CF-pointer*/ 2 << AL.getRange();
return;
}
} else if (AL.isUsedAsTypeAttr()) {
@@ -4786,6 +4857,8 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
case ParsedAttr::AT_NSReturnsRetained:
case ParsedAttr::AT_NSReturnsAutoreleased:
case ParsedAttr::AT_NSReturnsNotRetained:
+ case ParsedAttr::AT_OSReturnsRetained:
+ case ParsedAttr::AT_OSReturnsNotRetained:
ExpectedDeclKind = ExpectedFunctionOrMethod;
break;
@@ -4810,13 +4883,19 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
case ParsedAttr::AT_NSReturnsAutoreleased:
case ParsedAttr::AT_NSReturnsNotRetained:
- TypeOK = isValidSubjectOfNSAttribute(S, ReturnType);
+ TypeOK = isValidSubjectOfNSAttribute(ReturnType);
Cf = false;
break;
case ParsedAttr::AT_CFReturnsRetained:
case ParsedAttr::AT_CFReturnsNotRetained:
- TypeOK = isValidSubjectOfCFAttribute(S, ReturnType);
+ TypeOK = isValidSubjectOfCFAttribute(ReturnType);
+ Cf = true;
+ break;
+
+ case ParsedAttr::AT_OSReturnsRetained:
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ TypeOK = isValidSubjectOfOSAttribute(ReturnType);
Cf = true;
break;
}
@@ -4849,24 +4928,25 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
default:
llvm_unreachable("invalid ownership attribute");
case ParsedAttr::AT_NSReturnsAutoreleased:
- D->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<NSReturnsAutoreleasedAttr>(S, D, AL);
return;
case ParsedAttr::AT_CFReturnsNotRetained:
- D->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<CFReturnsNotRetainedAttr>(S, D, AL);
return;
case ParsedAttr::AT_NSReturnsNotRetained:
- D->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<NSReturnsNotRetainedAttr>(S, D, AL);
return;
case ParsedAttr::AT_CFReturnsRetained:
- D->addAttr(::new (S.Context) CFReturnsRetainedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<CFReturnsRetainedAttr>(S, D, AL);
return;
case ParsedAttr::AT_NSReturnsRetained:
- D->addAttr(::new (S.Context) NSReturnsRetainedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<NSReturnsRetainedAttr>(S, D, AL);
+ return;
+ case ParsedAttr::AT_OSReturnsRetained:
+ handleSimpleAttribute<OSReturnsRetainedAttr>(S, D, AL);
+ return;
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ handleSimpleAttribute<OSReturnsNotRetainedAttr>(S, D, AL);
return;
};
}
@@ -5834,10 +5914,8 @@ static void handleDeprecatedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
!S.checkStringLiteralArgumentAttr(AL, 1, Replacement))
return;
- if (!S.getLangOpts().CPlusPlus14)
- if (AL.isCXX11Attribute() &&
- !(AL.hasScope() && AL.getScopeName()->isStr("gnu")))
- S.Diag(AL.getLoc(), diag::ext_cxx14_attr) << AL;
+ if (!S.getLangOpts().CPlusPlus14 && AL.isCXX11Attribute() && !AL.isGNUScope())
+ S.Diag(AL.getLoc(), diag::ext_cxx14_attr) << AL;
D->addAttr(::new (S.Context)
DeprecatedAttr(AL.getRange(), S.Context, Str, Replacement,
@@ -6322,17 +6400,25 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
break;
case ParsedAttr::AT_CFConsumed:
case ParsedAttr::AT_NSConsumed:
- handleNSConsumedAttr(S, D, AL);
+ case ParsedAttr::AT_OSConsumed:
+ S.AddXConsumedAttr(D, AL.getRange(), AL.getAttributeSpellingListIndex(),
+ parsedAttrToRetainOwnershipKind(AL),
+ /*IsTemplateInstantiation=*/false);
break;
case ParsedAttr::AT_NSConsumesSelf:
handleSimpleAttribute<NSConsumesSelfAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_OSConsumesThis:
+ handleSimpleAttribute<OSConsumesThisAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_NSReturnsAutoreleased:
case ParsedAttr::AT_NSReturnsNotRetained:
- case ParsedAttr::AT_CFReturnsNotRetained:
case ParsedAttr::AT_NSReturnsRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
case ParsedAttr::AT_CFReturnsRetained:
- handleNSReturnsRetainedAttr(S, D, AL);
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ case ParsedAttr::AT_OSReturnsRetained:
+ handleXReturnsXRetainedAttr(S, D, AL);
break;
case ParsedAttr::AT_WorkGroupSizeHint:
handleWorkGroupSize<WorkGroupSizeHintAttr>(S, D, AL);
@@ -6358,6 +6444,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Section:
handleSectionAttr(S, D, AL);
break;
+ case ParsedAttr::AT_SpeculativeLoadHardening:
+ handleSimpleAttribute<SpeculativeLoadHardeningAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_CodeSeg:
handleCodeSegAttr(S, D, AL);
break;
@@ -6486,6 +6575,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_IntelOclBicc:
case ParsedAttr::AT_PreserveMost:
case ParsedAttr::AT_PreserveAll:
+ case ParsedAttr::AT_AArch64VectorPcs:
handleCallConvAttr(S, D, AL);
break;
case ParsedAttr::AT_Suppress:
@@ -6512,6 +6602,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_InternalLinkage:
handleInternalLinkageAttr(S, D, AL);
break;
+ case ParsedAttr::AT_ExcludeFromExplicitInstantiation:
+ handleSimpleAttribute<ExcludeFromExplicitInstantiationAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_LTOVisibilityPublic:
handleSimpleAttribute<LTOVisibilityPublicAttr>(S, D, AL);
break;
@@ -7732,7 +7825,7 @@ public:
bool VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
SemaRef.Diag(E->getBeginLoc(), diag::warn_at_available_unchecked_use)
- << (!SemaRef.getLangOpts().ObjC1);
+ << (!SemaRef.getLangOpts().ObjC);
return true;
}
@@ -7787,8 +7880,8 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
auto FixitDiag =
SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
<< Range << D
- << (SemaRef.getLangOpts().ObjC1 ? /*@available*/ 0
- : /*__builtin_available*/ 1);
+ << (SemaRef.getLangOpts().ObjC ? /*@available*/ 0
+ : /*__builtin_available*/ 1);
// Find the statement which should be enclosed in the if @available check.
if (StmtStack.empty())
@@ -7832,8 +7925,8 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
const char *ExtraIndentation = " ";
std::string FixItString;
llvm::raw_string_ostream FixItOS(FixItString);
- FixItOS << "if (" << (SemaRef.getLangOpts().ObjC1 ? "@available"
- : "__builtin_available")
+ FixItOS << "if (" << (SemaRef.getLangOpts().ObjC ? "@available"
+ : "__builtin_available")
<< "("
<< AvailabilityAttr::getPlatformNameSourceSpelling(
SemaRef.getASTContext().getTargetInfo().getPlatformName())
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index 3261a7031d..4a7ab11c71 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -1803,7 +1803,7 @@ static void CheckConstexprCtorInitializer(Sema &SemaRef,
static bool
CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
SmallVectorImpl<SourceLocation> &ReturnStmts,
- SourceLocation &Cxx1yLoc) {
+ SourceLocation &Cxx1yLoc, SourceLocation &Cxx2aLoc) {
// - its function-body shall be [...] a compound-statement that contains only
switch (S->getStmtClass()) {
case Stmt::NullStmtClass:
@@ -1840,7 +1840,7 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
CompoundStmt *CompStmt = cast<CompoundStmt>(S);
for (auto *BodyIt : CompStmt->body()) {
if (!CheckConstexprFunctionStmt(SemaRef, Dcl, BodyIt, ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
return false;
}
return true;
@@ -1858,11 +1858,11 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
IfStmt *If = cast<IfStmt>(S);
if (!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getThen(), ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
return false;
if (If->getElse() &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getElse(), ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
return false;
return true;
}
@@ -1881,7 +1881,7 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
for (Stmt *SubStmt : S->children())
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
return false;
return true;
@@ -1896,10 +1896,30 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
for (Stmt *SubStmt : S->children())
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
return false;
return true;
+ case Stmt::CXXTryStmtClass:
+ if (Cxx2aLoc.isInvalid())
+ Cxx2aLoc = S->getBeginLoc();
+ for (Stmt *SubStmt : S->children()) {
+ if (SubStmt &&
+ !CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc))
+ return false;
+ }
+ return true;
+
+ case Stmt::CXXCatchStmtClass:
+ // Do not bother checking the language mode (already covered by the
+ // try block check).
+ if (!CheckConstexprFunctionStmt(SemaRef, Dcl,
+ cast<CXXCatchStmt>(S)->getHandlerBlock(),
+ ReturnStmts, Cxx1yLoc, Cxx2aLoc))
+ return false;
+ return true;
+
default:
if (!isa<Expr>(S))
break;
@@ -1920,6 +1940,8 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
///
/// \return true if the body is OK, false if we have diagnosed a problem.
bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
+ SmallVector<SourceLocation, 4> ReturnStmts;
+
if (isa<CXXTryStmt>(Body)) {
// C++11 [dcl.constexpr]p3:
// The definition of a constexpr function shall satisfy the following
@@ -1930,22 +1952,35 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
// C++11 [dcl.constexpr]p4:
// In the definition of a constexpr constructor, [...]
// - its function-body shall not be a function-try-block;
- Diag(Body->getBeginLoc(), diag::err_constexpr_function_try_block)
+ //
+ // This restriction is lifted in C++2a, as long as inner statements also
+ // apply the general constexpr rules.
+ Diag(Body->getBeginLoc(),
+ !getLangOpts().CPlusPlus2a
+ ? diag::ext_constexpr_function_try_block_cxx2a
+ : diag::warn_cxx17_compat_constexpr_function_try_block)
<< isa<CXXConstructorDecl>(Dcl);
- return false;
}
- SmallVector<SourceLocation, 4> ReturnStmts;
-
// - its function-body shall be [...] a compound-statement that contains only
// [... list of cases ...]
- CompoundStmt *CompBody = cast<CompoundStmt>(Body);
- SourceLocation Cxx1yLoc;
- for (auto *BodyIt : CompBody->body()) {
- if (!CheckConstexprFunctionStmt(*this, Dcl, BodyIt, ReturnStmts, Cxx1yLoc))
+ //
+ // Note that walking the children here is enough to properly check for
+ // CompoundStmt and CXXTryStmt body.
+ SourceLocation Cxx1yLoc, Cxx2aLoc;
+ for (Stmt *SubStmt : Body->children()) {
+ if (SubStmt &&
+ !CheckConstexprFunctionStmt(*this, Dcl, SubStmt, ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc))
return false;
}
+ if (Cxx2aLoc.isValid())
+ Diag(Cxx2aLoc,
+ getLangOpts().CPlusPlus2a
+ ? diag::warn_cxx17_compat_constexpr_body_invalid_stmt
+ : diag::ext_constexpr_body_invalid_stmt_cxx2a)
+ << isa<CXXConstructorDecl>(Dcl);
if (Cxx1yLoc.isValid())
Diag(Cxx1yLoc,
getLangOpts().CPlusPlus14
@@ -2832,13 +2867,14 @@ static const ParsedAttr *getMSPropertyAttr(const ParsedAttributesView &list) {
return nullptr;
}
-// Check if there is a field shadowing.
-void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
- DeclarationName FieldName,
- const CXXRecordDecl *RD) {
- if (Diags.isIgnored(diag::warn_shadow_field, Loc))
- return;
-
+// Check if there is a field shadowing.
+void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
+ DeclarationName FieldName,
+ const CXXRecordDecl *RD,
+ bool DeclIsField) {
+ if (Diags.isIgnored(diag::warn_shadow_field, Loc))
+ return;
+
// To record a shadowed field in a base
std::map<CXXRecordDecl*, NamedDecl*> Bases;
auto FieldShadowed = [&](const CXXBaseSpecifier *Specifier,
@@ -2872,13 +2908,13 @@ void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
continue;
auto BaseField = It->second;
assert(BaseField->getAccess() != AS_private);
- if (AS_none !=
- CXXRecordDecl::MergeAccess(P.Access, BaseField->getAccess())) {
- Diag(Loc, diag::warn_shadow_field)
- << FieldName << RD << Base;
- Diag(BaseField->getLocation(), diag::note_shadow_field);
- Bases.erase(It);
- }
+ if (AS_none !=
+ CXXRecordDecl::MergeAccess(P.Access, BaseField->getAccess())) {
+ Diag(Loc, diag::warn_shadow_field)
+ << FieldName << RD << Base << DeclIsField;
+ Diag(BaseField->getLocation(), diag::note_shadow_field);
+ Bases.erase(It);
+ }
}
}
@@ -3232,7 +3268,7 @@ namespace {
ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParenImpCasts());
}
- // Binding a reference to an unintialized field is not an
+ // Binding a reference to an uninitialized field is not an
// uninitialized use.
if (CheckReferenceOnly && !ReferenceField)
return true;
@@ -3730,8 +3766,7 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc) {
- Expr *List = new (Context) ParenListExpr(Context, LParenLoc,
- Args, RParenLoc);
+ Expr *List = ParenListExpr::Create(Context, LParenLoc, Args, RParenLoc);
return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy,
DS, IdLoc, List, EllipsisLoc);
}
@@ -5706,8 +5741,28 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
continue;
if (!getDLLAttr(Member)) {
- auto *NewAttr =
- cast<InheritableAttr>(ClassAttr->clone(getASTContext()));
+ InheritableAttr *NewAttr = nullptr;
+
+ // Do not export/import inline function when -fno-dllexport-inlines is
+ // passed. But add attribute for later local static var check.
+ if (!getLangOpts().DllExportInlines && MD && MD->isInlined() &&
+ TSK != TSK_ExplicitInstantiationDeclaration &&
+ TSK != TSK_ExplicitInstantiationDefinition) {
+ if (ClassExported) {
+ NewAttr = ::new (getASTContext())
+ DLLExportStaticLocalAttr(ClassAttr->getRange(),
+ getASTContext(),
+ ClassAttr->getSpellingListIndex());
+ } else {
+ NewAttr = ::new (getASTContext())
+ DLLImportStaticLocalAttr(ClassAttr->getRange(),
+ getASTContext(),
+ ClassAttr->getSpellingListIndex());
+ }
+ } else {
+ NewAttr = cast<InheritableAttr>(ClassAttr->clone(getASTContext()));
+ }
+
NewAttr->setInherited(true);
Member->addAttr(NewAttr);
@@ -7222,8 +7277,17 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
if (getLangOpts().CUDA) {
// We should delete the special member in CUDA mode if target inference
// failed.
- return inferCUDATargetForImplicitSpecialMember(RD, CSM, MD, SMI.ConstArg,
- Diagnose);
+ // For inherited constructors (non-null ICI), CSM may be passed so that MD
+ // is treated as certain special member, which may not reflect what special
+ // member MD really is. However inferCUDATargetForImplicitSpecialMember
+ // expects CSM to match MD, therefore recalculate CSM.
+ assert(ICI || CSM == getSpecialMember(MD));
+ auto RealCSM = CSM;
+ if (ICI)
+ RealCSM = getSpecialMember(MD);
+
+ return inferCUDATargetForImplicitSpecialMember(RD, RealCSM, MD,
+ SMI.ConstArg, Diagnose);
}
return false;
@@ -7654,7 +7718,7 @@ struct FindHiddenVirtualMethod {
SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
private:
- /// Check whether any most overriden method from MD in Methods
+ /// Check whether any most overridden method from MD in Methods
static bool CheckMostOverridenMethods(
const CXXMethodDecl *MD,
const llvm::SmallPtrSetImpl<const CXXMethodDecl *> &Methods) {
@@ -7738,7 +7802,7 @@ void Sema::FindHiddenVirtualMethods(CXXMethodDecl *MD,
FHVM.Method = MD;
FHVM.S = this;
- // Keep the base methods that were overriden or introduced in the subclass
+ // Keep the base methods that were overridden or introduced in the subclass
// by 'using' in a set. A base method not in this set is hidden.
CXXRecordDecl *DC = MD->getParent();
DeclContext::lookup_result R = DC->lookup(MD->getDeclName());
@@ -8135,7 +8199,7 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
return R;
FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
- EPI.TypeQuals = 0;
+ EPI.TypeQuals = Qualifiers();
EPI.RefQualifier = RQ_None;
return Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(), EPI);
@@ -8341,7 +8405,7 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
EPI.Variadic = false;
- EPI.TypeQuals = 0;
+ EPI.TypeQuals = Qualifiers();
EPI.RefQualifier = RQ_None;
return Context.getFunctionType(Context.VoidTy, None, EPI);
}
@@ -11927,7 +11991,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Dereference "this".
DerefBuilder DerefThis(This);
CastBuilder To(DerefThis,
- Context.getCVRQualifiedType(
+ Context.getQualifiedType(
BaseType, CopyAssignOperator->getTypeQualifiers()),
VK_LValue, BasePath);
@@ -12294,7 +12358,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// Implicitly cast "this" to the appropriately-qualified base type.
CastBuilder To(DerefThis,
- Context.getCVRQualifiedType(
+ Context.getQualifiedType(
BaseType, MoveAssignOperator->getTypeQualifiers()),
VK_LValue, BasePath);
@@ -13679,7 +13743,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
// Only the non-fragile NeXT runtime currently supports C++ catches
// of ObjC types, and no runtime supports catching ObjC types by value.
- if (!Invalid && getLangOpts().ObjC1) {
+ if (!Invalid && getLangOpts().ObjC) {
QualType T = ExDeclType;
if (const ReferenceType *RT = T->getAs<ReferenceType>())
T = RT->getPointeeType();
@@ -13831,6 +13895,8 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
ExprResult Converted = PerformContextuallyConvertToBool(AssertExpr);
if (Converted.isInvalid())
Failed = true;
+ else
+ Converted = ConstantExpr::Create(Context, Converted.get());
llvm::APSInt Cond;
if (!Failed && VerifyIntegerConstantExpression(Converted.get(), &Cond,
@@ -13847,9 +13913,9 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *InnerCond = nullptr;
std::string InnerCondDescription;
std::tie(InnerCond, InnerCondDescription) =
- findFailedBooleanCondition(Converted.get(),
- /*AllowTopLevelCond=*/false);
- if (InnerCond) {
+ findFailedBooleanCondition(Converted.get());
+ if (InnerCond && !isa<CXXBoolLiteralExpr>(InnerCond)
+ && !isa<IntegerLiteral>(InnerCond)) {
Diag(StaticAssertLoc, diag::err_static_assert_requirement_failed)
<< InnerCondDescription << !AssertMessage
<< Msg.str() << InnerCond->getSourceRange();
@@ -14937,8 +15003,11 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
// region.
if (LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
!isInOpenMPDeclareTargetContext() &&
- !isInOpenMPTargetExecutionDirective())
+ !isInOpenMPTargetExecutionDirective()) {
+ if (!DefinitionRequired)
+ MarkVirtualMembersReferenced(Loc, Class);
return;
+ }
// Try to insert this class into the map.
LoadExternalVTableUses();
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index ac1d8cf7a3..3746bdad03 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -363,6 +363,8 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
assert((getCurMethodDecl() == nullptr) && "Methodparsing confused");
ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
+ PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+
// If we don't have a valid method decl, simply return.
if (!MDecl)
return;
@@ -2882,7 +2884,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
IMPDecl, PI, IncompleteImpl, false,
WarnCategoryMethodImpl);
- // FIXME. For now, we are not checking for extact match of methods
+ // FIXME. For now, we are not checking for exact match of methods
// in category implementation and its primary class's super class.
if (!WarnCategoryMethodImpl && I->getSuperClass())
MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp
index 4de205c504..e0850feaff 100644
--- a/lib/Sema/SemaExceptionSpec.cpp
+++ b/lib/Sema/SemaExceptionSpec.cpp
@@ -1051,6 +1051,9 @@ CanThrowResult Sema::canThrow(const Expr *E) {
// [Can throw] if in a potentially-evaluated context the expression would
// contain:
switch (E->getStmtClass()) {
+ case Expr::ConstantExprClass:
+ return canThrow(cast<ConstantExpr>(E)->getSubExpr());
+
case Expr::CXXThrowExprClass:
// - a potentially evaluated throw-expression
return CT_Can;
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index 26fb107688..4ba0fb12e7 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -730,20 +730,33 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
return ExprError();
E = Res.get();
+ QualType ScalarTy = Ty;
+ unsigned NumElts = 0;
+ if (const ExtVectorType *VecTy = Ty->getAs<ExtVectorType>()) {
+ NumElts = VecTy->getNumElements();
+ ScalarTy = VecTy->getElementType();
+ }
+
// If this is a 'float' or '__fp16' (CVR qualified or typedef)
// promote to double.
// Note that default argument promotion applies only to float (and
// half/fp16); it does not apply to _Float16.
- const BuiltinType *BTy = Ty->getAs<BuiltinType>();
+ const BuiltinType *BTy = ScalarTy->getAs<BuiltinType>();
if (BTy && (BTy->getKind() == BuiltinType::Half ||
BTy->getKind() == BuiltinType::Float)) {
if (getLangOpts().OpenCL &&
!getOpenCLOptions().isEnabled("cl_khr_fp64")) {
- if (BTy->getKind() == BuiltinType::Half) {
- E = ImpCastExprToType(E, Context.FloatTy, CK_FloatingCast).get();
- }
+ if (BTy->getKind() == BuiltinType::Half) {
+ QualType Ty = Context.FloatTy;
+ if (NumElts != 0)
+ Ty = Context.getExtVectorType(Ty, NumElts);
+ E = ImpCastExprToType(E, Ty, CK_FloatingCast).get();
+ }
} else {
- E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).get();
+ QualType Ty = Context.DoubleTy;
+ if (NumElts != 0)
+ Ty = Context.getExtVectorType(Ty, NumElts);
+ E = ImpCastExprToType(E, Ty, CK_FloatingCast).get();
}
}
@@ -1559,6 +1572,32 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
CharTy = Context.UnsignedCharTy;
}
+ // Warn on initializing an array of char from a u8 string literal; this
+ // becomes ill-formed in C++2a.
+ if (getLangOpts().CPlusPlus && !getLangOpts().CPlusPlus2a &&
+ !getLangOpts().Char8 && Kind == StringLiteral::UTF8) {
+ Diag(StringTokLocs.front(), diag::warn_cxx2a_compat_utf8_string);
+
+ // Create removals for all 'u8' prefixes in the string literal(s). This
+ // ensures C++2a compatibility (but may change the program behavior when
+ // built by non-Clang compilers for which the execution character set is
+ // not always UTF-8).
+ auto RemovalDiag = PDiag(diag::note_cxx2a_compat_utf8_string_remove_u8);
+ SourceLocation RemovalDiagLoc;
+ for (const Token &Tok : StringToks) {
+ if (Tok.getKind() == tok::utf8_string_literal) {
+ if (RemovalDiagLoc.isInvalid())
+ RemovalDiagLoc = Tok.getLocation();
+ RemovalDiag << FixItHint::CreateRemoval(CharSourceRange::getCharRange(
+ Tok.getLocation(),
+ Lexer::AdvanceToTokenCharacter(Tok.getLocation(), 2,
+ getSourceManager(), getLangOpts())));
+ }
+ }
+ Diag(RemovalDiagLoc, RemovalDiag);
+ }
+
+
QualType CharTyConst = CharTy;
// A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
@@ -3036,7 +3075,7 @@ static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
}
ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
- PredefinedExpr::IdentType IT) {
+ PredefinedExpr::IdentKind IK) {
// Pick the current block, lambda, captured statement or function.
Decl *currentDecl = nullptr;
if (const BlockScopeInfo *BSI = getCurBlock())
@@ -3060,11 +3099,11 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
else {
// Pre-defined identifiers are of type char[x], where x is the length of
// the string.
- auto Str = PredefinedExpr::ComputeName(IT, currentDecl);
+ auto Str = PredefinedExpr::ComputeName(IK, currentDecl);
unsigned Length = Str.length();
llvm::APInt LengthI(32, Length + 1);
- if (IT == PredefinedExpr::LFunction || IT == PredefinedExpr::LFuncSig) {
+ if (IK == PredefinedExpr::LFunction || IK == PredefinedExpr::LFuncSig) {
ResTy =
Context.adjustStringLiteralBaseType(Context.WideCharTy.withConst());
SmallString<32> RawChars;
@@ -3083,24 +3122,24 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
}
}
- return new (Context) PredefinedExpr(Loc, ResTy, IT, SL);
+ return PredefinedExpr::Create(Context, Loc, ResTy, IK, SL);
}
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
- PredefinedExpr::IdentType IT;
+ PredefinedExpr::IdentKind IK;
switch (Kind) {
default: llvm_unreachable("Unknown simple primary expr!");
- case tok::kw___func__: IT = PredefinedExpr::Func; break; // [C99 6.4.2.2]
- case tok::kw___FUNCTION__: IT = PredefinedExpr::Function; break;
- case tok::kw___FUNCDNAME__: IT = PredefinedExpr::FuncDName; break; // [MS]
- case tok::kw___FUNCSIG__: IT = PredefinedExpr::FuncSig; break; // [MS]
- case tok::kw_L__FUNCTION__: IT = PredefinedExpr::LFunction; break; // [MS]
- case tok::kw_L__FUNCSIG__: IT = PredefinedExpr::LFuncSig; break; // [MS]
- case tok::kw___PRETTY_FUNCTION__: IT = PredefinedExpr::PrettyFunction; break;
+ case tok::kw___func__: IK = PredefinedExpr::Func; break; // [C99 6.4.2.2]
+ case tok::kw___FUNCTION__: IK = PredefinedExpr::Function; break;
+ case tok::kw___FUNCDNAME__: IK = PredefinedExpr::FuncDName; break; // [MS]
+ case tok::kw___FUNCSIG__: IK = PredefinedExpr::FuncSig; break; // [MS]
+ case tok::kw_L__FUNCTION__: IK = PredefinedExpr::LFunction; break; // [MS]
+ case tok::kw_L__FUNCSIG__: IK = PredefinedExpr::LFuncSig; break; // [MS]
+ case tok::kw___PRETTY_FUNCTION__: IK = PredefinedExpr::PrettyFunction; break;
}
- return BuildPredefinedExpr(Loc, IT);
+ return BuildPredefinedExpr(Loc, IK);
}
ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
@@ -3596,7 +3635,8 @@ static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
// C99 6.5.3.4p1:
if (T->isFunctionType() &&
- (TraitKind == UETT_SizeOf || TraitKind == UETT_AlignOf)) {
+ (TraitKind == UETT_SizeOf || TraitKind == UETT_AlignOf ||
+ TraitKind == UETT_PreferredAlignOf)) {
// sizeof(function)/alignof(function) is allowed as an extension.
S.Diag(Loc, diag::ext_sizeof_alignof_function_type)
<< TraitKind << ArgRange;
@@ -3674,7 +3714,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// the expression to be complete. 'sizeof' requires the expression's type to
// be complete (and will attempt to complete it if it's an array of unknown
// bound).
- if (ExprKind == UETT_AlignOf) {
+ if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
if (RequireCompleteType(E->getExprLoc(),
Context.getBaseElementType(E->getType()),
diag::err_sizeof_alignof_incomplete_type, ExprKind,
@@ -3698,7 +3738,8 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// The operand for sizeof and alignof is in an unevaluated expression context,
// so side effects could result in unintended consequences.
- if ((ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf) &&
+ if ((ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf ||
+ ExprKind == UETT_PreferredAlignOf) &&
!inTemplateInstantiation() && E->HasSideEffects(Context, false))
Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
@@ -3767,7 +3808,8 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
// C11 6.5.3.4/3, C++11 [expr.alignof]p3:
// When alignof or _Alignof is applied to an array type, the result
// is the alignment of the element type.
- if (ExprKind == UETT_AlignOf || ExprKind == UETT_OpenMPRequiredSimdAlign)
+ if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf ||
+ ExprKind == UETT_OpenMPRequiredSimdAlign)
ExprType = Context.getBaseElementType(ExprType);
if (ExprKind == UETT_VecStep)
@@ -3796,7 +3838,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
return false;
}
-static bool CheckAlignOfExpr(Sema &S, Expr *E) {
+static bool CheckAlignOfExpr(Sema &S, Expr *E, UnaryExprOrTypeTrait ExprKind) {
E = E->IgnoreParens();
// Cannot know anything else if the expression is dependent.
@@ -3850,7 +3892,7 @@ static bool CheckAlignOfExpr(Sema &S, Expr *E) {
return false;
}
- return S.CheckUnaryExprOrTypeTraitOperand(E, UETT_AlignOf);
+ return S.CheckUnaryExprOrTypeTraitOperand(E, ExprKind);
}
bool Sema::CheckVecStepExpr(Expr *E) {
@@ -4046,8 +4088,8 @@ Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
bool isInvalid = false;
if (E->isTypeDependent()) {
// Delay type-checking for type-dependent expressions.
- } else if (ExprKind == UETT_AlignOf) {
- isInvalid = CheckAlignOfExpr(*this, E);
+ } else if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
+ isInvalid = CheckAlignOfExpr(*this, E, ExprKind);
} else if (ExprKind == UETT_VecStep) {
isInvalid = CheckVecStepExpr(E);
} else if (ExprKind == UETT_OpenMPRequiredSimdAlign) {
@@ -4246,7 +4288,57 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, idx);
}
- return CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc);
+ ExprResult Res = CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc);
+
+ if (!Res.isInvalid() && isa<ArraySubscriptExpr>(Res.get()))
+ CheckSubscriptAccessOfNoDeref(cast<ArraySubscriptExpr>(Res.get()));
+
+ return Res;
+}
+
+void Sema::CheckAddressOfNoDeref(const Expr *E) {
+ ExpressionEvaluationContextRecord &LastRecord = ExprEvalContexts.back();
+ const Expr *StrippedExpr = E->IgnoreParenImpCasts();
+
+ // For expressions like `&(*s).b`, the base is recorded and what should be
+ // checked.
+ const MemberExpr *Member = nullptr;
+ while ((Member = dyn_cast<MemberExpr>(StrippedExpr)) && !Member->isArrow())
+ StrippedExpr = Member->getBase()->IgnoreParenImpCasts();
+
+ LastRecord.PossibleDerefs.erase(StrippedExpr);
+}
+
+void Sema::CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E) {
+ QualType ResultTy = E->getType();
+ ExpressionEvaluationContextRecord &LastRecord = ExprEvalContexts.back();
+
+ // Bail if the element is an array since it is not memory access.
+ if (isa<ArrayType>(ResultTy))
+ return;
+
+ if (ResultTy->hasAttr(attr::NoDeref)) {
+ LastRecord.PossibleDerefs.insert(E);
+ return;
+ }
+
+ // Check if the base type is a pointer to a member access of a struct
+ // marked with noderef.
+ const Expr *Base = E->getBase();
+ QualType BaseTy = Base->getType();
+ if (!(isa<ArrayType>(BaseTy) || isa<PointerType>(BaseTy)))
+ // Not a pointer access
+ return;
+
+ const MemberExpr *Member = nullptr;
+ while ((Member = dyn_cast<MemberExpr>(Base->IgnoreParenCasts())) &&
+ Member->isArrow())
+ Base = Member->getBase();
+
+ if (const auto *Ptr = dyn_cast<PointerType>(Base->getType())) {
+ if (Ptr->getPointeeType()->hasAttr(attr::NoDeref))
+ LastRecord.PossibleDerefs.insert(E);
+ }
}
ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
@@ -4347,10 +4439,11 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
return ExprError();
if (LowerBound && !OriginalTy->isAnyPointerType()) {
- llvm::APSInt LowerBoundValue;
- if (LowerBound->EvaluateAsInt(LowerBoundValue, Context)) {
+ Expr::EvalResult Result;
+ if (LowerBound->EvaluateAsInt(Result, Context)) {
// OpenMP 4.5, [2.4 Array Sections]
// The array section must be a subset of the original array.
+ llvm::APSInt LowerBoundValue = Result.Val.getInt();
if (LowerBoundValue.isNegative()) {
Diag(LowerBound->getExprLoc(), diag::err_omp_section_not_subset_of_array)
<< LowerBound->getSourceRange();
@@ -4360,10 +4453,11 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
}
if (Length) {
- llvm::APSInt LengthValue;
- if (Length->EvaluateAsInt(LengthValue, Context)) {
+ Expr::EvalResult Result;
+ if (Length->EvaluateAsInt(Result, Context)) {
// OpenMP 4.5, [2.4 Array Sections]
// The length must evaluate to non-negative integers.
+ llvm::APSInt LengthValue = Result.Val.getInt();
if (LengthValue.isNegative()) {
Diag(Length->getExprLoc(), diag::err_omp_section_length_negative)
<< LengthValue.toString(/*Radix=*/10, /*Signed=*/true)
@@ -4828,7 +4922,10 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
return true;
}
- Call->setNumArgs(Context, NumParams);
+ // We reserve space for the default arguments when we create
+ // the call expression, before calling ConvertArgumentsForCall.
+ assert((Call->getNumArgs() == NumParams) &&
+ "We should have reserved space for the default arguments before!");
}
// If too many are passed and not variadic, error on the extras and drop
@@ -4869,7 +4966,7 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
Diag(FDecl->getBeginLoc(), diag::note_callee_decl) << FDecl;
// This deletes the extra arguments.
- Call->setNumArgs(Context, NumParams);
+ Call->shrinkNumArgs(NumParams);
return true;
}
}
@@ -5063,6 +5160,9 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
#define PLACEHOLDER_TYPE(ID, SINGLETON_ID)
#define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID:
#include "clang/AST/BuiltinTypes.def"
@@ -5485,12 +5585,11 @@ ExprResult Sema::ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
/// block-pointer type.
///
/// \param NDecl the declaration being called, if available
-ExprResult
-Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
- SourceLocation LParenLoc,
- ArrayRef<Expr *> Args,
- SourceLocation RParenLoc,
- Expr *Config, bool IsExecConfig) {
+ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> Args,
+ SourceLocation RParenLoc, Expr *Config,
+ bool IsExecConfig, ADLCallKind UsesADL) {
FunctionDecl *FDecl = dyn_cast_or_null<FunctionDecl>(NDecl);
unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0);
@@ -5515,28 +5614,71 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// We special-case function promotion here because we only allow promoting
// builtin functions to function pointers in the callee of a call.
ExprResult Result;
+ QualType ResultTy;
if (BuiltinID &&
Fn->getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn)) {
- Result = ImpCastExprToType(Fn, Context.getPointerType(FDecl->getType()),
- CK_BuiltinFnToFnPtr).get();
+ // Extract the return type from the (builtin) function pointer type.
+ // FIXME Several builtins still have setType in
+ // Sema::CheckBuiltinFunctionCall. One should review their definitions in
+ // Builtins.def to ensure they are correct before removing setType calls.
+ QualType FnPtrTy = Context.getPointerType(FDecl->getType());
+ Result = ImpCastExprToType(Fn, FnPtrTy, CK_BuiltinFnToFnPtr).get();
+ ResultTy = FDecl->getCallResultType();
} else {
Result = CallExprUnaryConversions(Fn);
+ ResultTy = Context.BoolTy;
}
if (Result.isInvalid())
return ExprError();
Fn = Result.get();
- // Make the call expr early, before semantic checks. This guarantees cleanup
- // of arguments and function on error.
+ // Check for a valid function type, but only if it is not a builtin which
+ // requires custom type checking. These will be handled by
+ // CheckBuiltinFunctionCall below just after creation of the call expression.
+ const FunctionType *FuncT = nullptr;
+ if (!BuiltinID || !Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) {
+ retry:
+ if (const PointerType *PT = Fn->getType()->getAs<PointerType>()) {
+ // C99 6.5.2.2p1 - "The expression that denotes the called function shall
+ // have type pointer to function".
+ FuncT = PT->getPointeeType()->getAs<FunctionType>();
+ if (!FuncT)
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+ } else if (const BlockPointerType *BPT =
+ Fn->getType()->getAs<BlockPointerType>()) {
+ FuncT = BPT->getPointeeType()->castAs<FunctionType>();
+ } else {
+ // Handle calls to expressions of unknown-any type.
+ if (Fn->getType() == Context.UnknownAnyTy) {
+ ExprResult rewrite = rebuildUnknownAnyFunction(*this, Fn);
+ if (rewrite.isInvalid()) return ExprError();
+ Fn = rewrite.get();
+ goto retry;
+ }
+
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+ }
+ }
+
+ // Get the number of parameters in the function prototype, if any.
+ // We will allocate space for max(Args.size(), NumParams) arguments
+ // in the call expression.
+ const auto *Proto = dyn_cast_or_null<FunctionProtoType>(FuncT);
+ unsigned NumParams = Proto ? Proto->getNumParams() : 0;
+
CallExpr *TheCall;
- if (Config)
- TheCall = new (Context) CUDAKernelCallExpr(Context, Fn,
- cast<CallExpr>(Config), Args,
- Context.BoolTy, VK_RValue,
- RParenLoc);
- else
- TheCall = new (Context) CallExpr(Context, Fn, Args, Context.BoolTy,
- VK_RValue, RParenLoc);
+ if (Config) {
+ assert(UsesADL == ADLCallKind::NotADL &&
+ "CUDAKernelCallExpr should not use ADL");
+ TheCall = new (Context)
+ CUDAKernelCallExpr(Context, Fn, cast<CallExpr>(Config), Args, ResultTy,
+ VK_RValue, RParenLoc, NumParams);
+ } else {
+ TheCall = new (Context) CallExpr(Context, Fn, Args, ResultTy, VK_RValue,
+ RParenLoc, NumParams, UsesADL);
+ }
if (!getLangOpts().CPlusPlus) {
// C cannot always handle TypoExpr nodes in builtin calls and direct
@@ -5547,39 +5689,16 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (!Result.isUsable()) return ExprError();
TheCall = dyn_cast<CallExpr>(Result.get());
if (!TheCall) return Result;
- Args = llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs());
+ // TheCall at this point has max(Args.size(), NumParams) arguments,
+ // with extra arguments nulled. We don't want to introduce nulled
+ // arguments in Args and so we only take the first Args.size() arguments.
+ Args = llvm::makeArrayRef(TheCall->getArgs(), Args.size());
}
- // Bail out early if calling a builtin with custom typechecking.
+ // Bail out early if calling a builtin with custom type checking.
if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID))
return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall);
- retry:
- const FunctionType *FuncT;
- if (const PointerType *PT = Fn->getType()->getAs<PointerType>()) {
- // C99 6.5.2.2p1 - "The expression that denotes the called function shall
- // have type pointer to function".
- FuncT = PT->getPointeeType()->getAs<FunctionType>();
- if (!FuncT)
- return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
- << Fn->getType() << Fn->getSourceRange());
- } else if (const BlockPointerType *BPT =
- Fn->getType()->getAs<BlockPointerType>()) {
- FuncT = BPT->getPointeeType()->castAs<FunctionType>();
- } else {
- // Handle calls to expressions of unknown-any type.
- if (Fn->getType() == Context.UnknownAnyTy) {
- ExprResult rewrite = rebuildUnknownAnyFunction(*this, Fn);
- if (rewrite.isInvalid()) return ExprError();
- Fn = rewrite.get();
- TheCall->setCallee(Fn);
- goto retry;
- }
-
- return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
- << Fn->getType() << Fn->getSourceRange());
- }
-
if (getLangOpts().CUDA) {
if (Config) {
// CUDA: Kernel calls must be to global functions
@@ -5608,7 +5727,6 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
TheCall->setType(FuncT->getCallResultType(Context));
TheCall->setValueKind(Expr::getValueKindForType(FuncT->getReturnType()));
- const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT);
if (Proto) {
if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, RParenLoc,
IsExecConfig))
@@ -5739,21 +5857,6 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
LiteralExpr = Result.get();
bool isFileScope = !CurContext->isFunctionOrMethod();
- if (isFileScope) {
- if (!LiteralExpr->isTypeDependent() &&
- !LiteralExpr->isValueDependent() &&
- !literalType->isDependentType()) // C99 6.5.2.5p3
- if (CheckForConstantInitializer(LiteralExpr, literalType))
- return ExprError();
- } else if (literalType.getAddressSpace() != LangAS::opencl_private &&
- literalType.getAddressSpace() != LangAS::Default) {
- // Embedded-C extensions to C99 6.5.2.5:
- // "If the compound literal occurs inside the body of a function, the
- // type name shall not be qualified by an address-space qualifier."
- Diag(LParenLoc, diag::err_compound_literal_with_address_space)
- << SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd());
- return ExprError();
- }
// In C, compound literals are l-values for some reason.
// For GCC compatibility, in C++, file-scope array compound literals with
@@ -5778,9 +5881,32 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
? VK_RValue
: VK_LValue;
- return MaybeBindToTemporary(
- new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
- VK, LiteralExpr, isFileScope));
+ if (isFileScope)
+ if (auto ILE = dyn_cast<InitListExpr>(LiteralExpr))
+ for (unsigned i = 0, j = ILE->getNumInits(); i != j; i++) {
+ Expr *Init = ILE->getInit(i);
+ ILE->setInit(i, ConstantExpr::Create(Context, Init));
+ }
+
+ Expr *E = new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
+ VK, LiteralExpr, isFileScope);
+ if (isFileScope) {
+ if (!LiteralExpr->isTypeDependent() &&
+ !LiteralExpr->isValueDependent() &&
+ !literalType->isDependentType()) // C99 6.5.2.5p3
+ if (CheckForConstantInitializer(LiteralExpr, literalType))
+ return ExprError();
+ } else if (literalType.getAddressSpace() != LangAS::opencl_private &&
+ literalType.getAddressSpace() != LangAS::Default) {
+ // Embedded-C extensions to C99 6.5.2.5:
+ // "If the compound literal occurs inside the body of a function, the
+ // type name shall not be qualified by an address-space qualifier."
+ Diag(LParenLoc, diag::err_compound_literal_with_address_space)
+ << SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd());
+ return ExprError();
+ }
+
+ return MaybeBindToTemporary(E);
}
ExprResult
@@ -5862,6 +5988,8 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
LangAS DestAS = DestTy->getPointeeType().getAddressSpace();
if (SrcAS != DestAS)
return CK_AddressSpaceConversion;
+ if (Context.hasCvrSimilarType(SrcTy, DestTy))
+ return CK_NoOp;
return CK_BitCast;
}
case Type::STK_BlockPointer:
@@ -5882,10 +6010,33 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_FloatingComplex:
case Type::STK_IntegralComplex:
case Type::STK_MemberPointer:
+ case Type::STK_FixedPoint:
llvm_unreachable("illegal cast from pointer");
}
llvm_unreachable("Should have returned before this");
+ case Type::STK_FixedPoint:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_FixedPoint:
+ return CK_FixedPointCast;
+ case Type::STK_Bool:
+ return CK_FixedPointToBoolean;
+ case Type::STK_Integral:
+ case Type::STK_Floating:
+ case Type::STK_IntegralComplex:
+ case Type::STK_FloatingComplex:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << DestTy;
+ return CK_IntegralCast;
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ case Type::STK_MemberPointer:
+ llvm_unreachable("illegal cast to pointer type");
+ }
+ llvm_unreachable("Should have returned before this");
+
case Type::STK_Bool: // casting from bool is like casting from an integer
case Type::STK_Integral:
switch (DestTy->getScalarTypeKind()) {
@@ -5914,6 +6065,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
return CK_FloatingRealToComplex;
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
+ case Type::STK_FixedPoint:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << SrcTy;
+ return CK_IntegralCast;
}
llvm_unreachable("Should have returned before this");
@@ -5941,6 +6097,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
llvm_unreachable("valid float->pointer cast?");
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
+ case Type::STK_FixedPoint:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << SrcTy;
+ return CK_IntegralCast;
}
llvm_unreachable("Should have returned before this");
@@ -5970,6 +6131,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
llvm_unreachable("valid complex float->pointer cast?");
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
+ case Type::STK_FixedPoint:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << SrcTy;
+ return CK_IntegralCast;
}
llvm_unreachable("Should have returned before this");
@@ -5999,6 +6165,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
llvm_unreachable("valid complex int->pointer cast?");
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
+ case Type::STK_FixedPoint:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << SrcTy;
+ return CK_IntegralCast;
}
llvm_unreachable("Should have returned before this");
}
@@ -6331,8 +6502,7 @@ Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) {
ExprResult Sema::ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val) {
- Expr *expr = new (Context) ParenListExpr(Context, L, Val, R);
- return expr;
+ return ParenListExpr::Create(Context, L, Val, R);
}
/// Emit a specialized diagnostic when one expression is a null pointer
@@ -7762,7 +7932,12 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
if (isa<PointerType>(RHSType)) {
LangAS AddrSpaceL = LHSPointer->getPointeeType().getAddressSpace();
LangAS AddrSpaceR = RHSType->getPointeeType().getAddressSpace();
- Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
+ if (AddrSpaceL != AddrSpaceR)
+ Kind = CK_AddressSpaceConversion;
+ else if (Context.hasCvrSimilarType(RHSType, LHSType))
+ Kind = CK_NoOp;
+ else
+ Kind = CK_BitCast;
return checkPointerTypesForAssignment(*this, LHSType, RHSType);
}
@@ -7830,7 +8005,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
}
// id -> T^
- if (getLangOpts().ObjC1 && RHSType->isObjCIdType()) {
+ if (getLangOpts().ObjC && RHSType->isObjCIdType()) {
Kind = CK_AnyPointerToBlockPointerCast;
return Compatible;
}
@@ -8034,6 +8209,17 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
ExprResult LocalRHS = CallerRHS;
ExprResult &RHS = ConvertRHS ? CallerRHS : LocalRHS;
+ if (const auto *LHSPtrType = LHSType->getAs<PointerType>()) {
+ if (const auto *RHSPtrType = RHS.get()->getType()->getAs<PointerType>()) {
+ if (RHSPtrType->getPointeeType()->hasAttr(attr::NoDeref) &&
+ !LHSPtrType->getPointeeType()->hasAttr(attr::NoDeref)) {
+ Diag(RHS.get()->getExprLoc(),
+ diag::warn_noderef_to_dereferenceable_pointer)
+ << RHS.get()->getSourceRange();
+ }
+ }
+ }
+
if (getLangOpts().CPlusPlus) {
if (!LHSType->isRecordType() && !LHSType->isAtomicType()) {
// C++ 5.17p3: If the left operand is not of class type, the
@@ -8139,7 +8325,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
if (!Diagnose)
return Incompatible;
}
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
(CheckObjCBridgeRelatedConversions(E->getBeginLoc(), LHSType,
E->getType(), E, Diagnose) ||
ConversionToObjCStringLiteralCheck(LHSType, E, Diagnose))) {
@@ -8154,6 +8340,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
if (ConvertRHS)
RHS = ImpCastExprToType(E, Ty, Kind);
}
+
return result;
}
@@ -8316,8 +8503,8 @@ static bool canConvertIntToOtherIntTy(Sema &S, ExprResult *Int,
// Reject cases where the value of the Int is unknown as that would
// possibly cause truncation, but accept cases where the scalar can be
// demoted without loss of precision.
- llvm::APSInt Result;
- bool CstInt = Int->get()->EvaluateAsInt(Result, S.Context);
+ Expr::EvalResult EVResult;
+ bool CstInt = Int->get()->EvaluateAsInt(EVResult, S.Context);
int Order = S.Context.getIntegerTypeOrder(OtherIntTy, IntTy);
bool IntSigned = IntTy->hasSignedIntegerRepresentation();
bool OtherIntSigned = OtherIntTy->hasSignedIntegerRepresentation();
@@ -8325,6 +8512,7 @@ static bool canConvertIntToOtherIntTy(Sema &S, ExprResult *Int,
if (CstInt) {
// If the scalar is constant and is of a higher order and has more active
// bits that the vector element type, reject it.
+ llvm::APSInt Result = EVResult.Val.getInt();
unsigned NumBits = IntSigned
? (Result.isNegative() ? Result.getMinSignedBits()
: Result.getActiveBits())
@@ -8352,8 +8540,9 @@ static bool canConvertIntTyToFloatTy(Sema &S, ExprResult *Int,
// Determine if the integer constant can be expressed as a floating point
// number of the appropriate type.
- llvm::APSInt Result;
- bool CstInt = Int->get()->EvaluateAsInt(Result, S.Context);
+ Expr::EvalResult EVResult;
+ bool CstInt = Int->get()->EvaluateAsInt(EVResult, S.Context);
+
uint64_t Bits = 0;
if (CstInt) {
// Reject constants that would be truncated if they were converted to
@@ -8361,6 +8550,7 @@ static bool canConvertIntTyToFloatTy(Sema &S, ExprResult *Int,
// FIXME: Ideally the conversion to an APFloat and from an APFloat
// could be avoided if there was a convertFromAPInt method
// which could signal back if implicit truncation occurred.
+ llvm::APSInt Result = EVResult.Val.getInt();
llvm::APFloat Float(S.Context.getFloatTypeSemantics(FloatTy));
Float.convertFromAPInt(Result, IntTy->hasSignedIntegerRepresentation(),
llvm::APFloat::rmTowardZero);
@@ -8670,13 +8860,40 @@ static void checkArithmeticNull(Sema &S, ExprResult &LHS, ExprResult &RHS,
<< LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
}
+static void DiagnoseDivisionSizeofPointer(Sema &S, Expr *LHS, Expr *RHS,
+ SourceLocation Loc) {
+ const auto *LUE = dyn_cast<UnaryExprOrTypeTraitExpr>(LHS);
+ const auto *RUE = dyn_cast<UnaryExprOrTypeTraitExpr>(RHS);
+ if (!LUE || !RUE)
+ return;
+ if (LUE->getKind() != UETT_SizeOf || LUE->isArgumentType() ||
+ RUE->getKind() != UETT_SizeOf)
+ return;
+
+ QualType LHSTy = LUE->getArgumentExpr()->IgnoreParens()->getType();
+ QualType RHSTy;
+
+ if (RUE->isArgumentType())
+ RHSTy = RUE->getArgumentType();
+ else
+ RHSTy = RUE->getArgumentExpr()->IgnoreParens()->getType();
+
+ if (!LHSTy->isPointerType() || RHSTy->isPointerType())
+ return;
+ if (LHSTy->getPointeeType() != RHSTy)
+ return;
+
+ S.Diag(Loc, diag::warn_division_sizeof_ptr) << LHS << LHS->getSourceRange();
+}
+
static void DiagnoseBadDivideOrRemainderValues(Sema& S, ExprResult &LHS,
ExprResult &RHS,
SourceLocation Loc, bool IsDiv) {
// Check for division/remainder by zero.
- llvm::APSInt RHSValue;
+ Expr::EvalResult RHSValue;
if (!RHS.get()->isValueDependent() &&
- RHS.get()->EvaluateAsInt(RHSValue, S.Context) && RHSValue == 0)
+ RHS.get()->EvaluateAsInt(RHSValue, S.Context) &&
+ RHSValue.Val.getInt() == 0)
S.DiagRuntimeBehavior(Loc, RHS.get(),
S.PDiag(diag::warn_remainder_division_by_zero)
<< IsDiv << RHS.get()->getSourceRange());
@@ -8700,8 +8917,10 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
if (compType.isNull() || !compType->isArithmeticType())
return InvalidOperands(Loc, LHS, RHS);
- if (IsDiv)
+ if (IsDiv) {
DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, IsDiv);
+ DiagnoseDivisionSizeofPointer(*this, LHS.get(), RHS.get(), Loc);
+ }
return compType;
}
@@ -8916,8 +9135,9 @@ static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
if (!IsStringPlusInt || IndexExpr->isValueDependent())
return;
- llvm::APSInt index;
- if (IndexExpr->EvaluateAsInt(index, Self.getASTContext())) {
+ Expr::EvalResult Result;
+ if (IndexExpr->EvaluateAsInt(Result, Self.getASTContext())) {
+ llvm::APSInt index = Result.Val.getInt();
unsigned StrLenWithNull = StrExpr->getLength() + 1;
if (index.isNonNegative() &&
index <= llvm::APSInt(llvm::APInt(index.getBitWidth(), StrLenWithNull),
@@ -9061,10 +9281,11 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
if (PExp->IgnoreParenCasts()->isNullPointerConstant(
Context, Expr::NPC_ValueDependentIsNotNull)) {
// In C++ adding zero to a null pointer is defined.
- llvm::APSInt KnownVal;
+ Expr::EvalResult KnownVal;
if (!getLangOpts().CPlusPlus ||
(!IExp->isValueDependent() &&
- (!IExp->EvaluateAsInt(KnownVal, Context) || KnownVal != 0))) {
+ (!IExp->EvaluateAsInt(KnownVal, Context) ||
+ KnownVal.Val.getInt() != 0))) {
// Check the conditions to see if this is the 'p = nullptr + n' idiom.
bool IsGNUIdiom = BinaryOperator::isNullPointerArithmeticExtension(
Context, BO_Add, PExp, IExp);
@@ -9139,10 +9360,11 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->IgnoreParenCasts()->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNotNull)) {
// In C++ adding zero to a null pointer is defined.
- llvm::APSInt KnownVal;
+ Expr::EvalResult KnownVal;
if (!getLangOpts().CPlusPlus ||
(!RHS.get()->isValueDependent() &&
- (!RHS.get()->EvaluateAsInt(KnownVal, Context) || KnownVal != 0))) {
+ (!RHS.get()->EvaluateAsInt(KnownVal, Context) ||
+ KnownVal.Val.getInt() != 0))) {
diagnoseArithmeticOnNullPointer(*this, Loc, LHS.get(), false);
}
}
@@ -9218,11 +9440,12 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
if (S.getLangOpts().OpenCL)
return;
- llvm::APSInt Right;
// Check right/shifter operand
+ Expr::EvalResult RHSResult;
if (RHS.get()->isValueDependent() ||
- !RHS.get()->EvaluateAsInt(Right, S.Context))
+ !RHS.get()->EvaluateAsInt(RHSResult, S.Context))
return;
+ llvm::APSInt Right = RHSResult.Val.getInt();
if (Right.isNegative()) {
S.DiagRuntimeBehavior(Loc, RHS.get(),
@@ -9245,11 +9468,12 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
// according to C++ has undefined behavior ([expr.shift] 5.8/2). Unsigned
// integers have defined behavior modulo one more than the maximum value
// representable in the result type, so never warn for those.
- llvm::APSInt Left;
+ Expr::EvalResult LHSResult;
if (LHS.get()->isValueDependent() ||
LHSType->hasUnsignedIntegerRepresentation() ||
- !LHS.get()->EvaluateAsInt(Left, S.Context))
+ !LHS.get()->EvaluateAsInt(LHSResult, S.Context))
return;
+ llvm::APSInt Left = LHSResult.Val.getInt();
// If LHS does not have a signed type and non-negative value
// then, the behavior is undefined. Warn about it.
@@ -10441,6 +10665,10 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
}
if (getLangOpts().OpenCLVersion >= 200) {
+ if (LHSType->isClkEventT() && RHSType->isClkEventT()) {
+ return computeResultTy();
+ }
+
if (LHSType->isQueueT() && RHSType->isQueueT()) {
return computeResultTy();
}
@@ -10615,8 +10843,9 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
// that isn't 0 or 1 (which indicate a potential logical operation that
// happened to fold to true/false) then warn.
// Parens on the RHS are ignored.
- llvm::APSInt Result;
- if (RHS.get()->EvaluateAsInt(Result, Context))
+ Expr::EvalResult EVResult;
+ if (RHS.get()->EvaluateAsInt(EVResult, Context)) {
+ llvm::APSInt Result = EVResult.Val.getInt();
if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType() &&
!RHS.get()->getExprLoc().isMacroID()) ||
(Result != 0 && Result != 1)) {
@@ -10636,6 +10865,7 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceRange(getLocForEndOfToken(LHS.get()->getEndLoc()),
RHS.get()->getEndLoc()));
}
+ }
}
if (!Context.getLangOpts().CPlusPlus) {
@@ -10886,30 +11116,38 @@ static void DiagnoseRecursiveConstFields(Sema &S, const ValueDecl *VD,
const RecordType *Ty,
SourceLocation Loc, SourceRange Range,
OriginalExprKind OEK,
- bool &DiagnosticEmitted,
- bool IsNested = false) {
+ bool &DiagnosticEmitted) {
+ std::vector<const RecordType *> RecordTypeList;
+ RecordTypeList.push_back(Ty);
+ unsigned NextToCheckIndex = 0;
// We walk the record hierarchy breadth-first to ensure that we print
// diagnostics in field nesting order.
- // First, check every field for constness.
- for (const FieldDecl *Field : Ty->getDecl()->fields()) {
- if (Field->getType().isConstQualified()) {
- if (!DiagnosticEmitted) {
- S.Diag(Loc, diag::err_typecheck_assign_const)
- << Range << NestedConstMember << OEK << VD
- << IsNested << Field;
- DiagnosticEmitted = true;
+ while (RecordTypeList.size() > NextToCheckIndex) {
+ bool IsNested = NextToCheckIndex > 0;
+ for (const FieldDecl *Field :
+ RecordTypeList[NextToCheckIndex]->getDecl()->fields()) {
+ // First, check every field for constness.
+ QualType FieldTy = Field->getType();
+ if (FieldTy.isConstQualified()) {
+ if (!DiagnosticEmitted) {
+ S.Diag(Loc, diag::err_typecheck_assign_const)
+ << Range << NestedConstMember << OEK << VD
+ << IsNested << Field;
+ DiagnosticEmitted = true;
+ }
+ S.Diag(Field->getLocation(), diag::note_typecheck_assign_const)
+ << NestedConstMember << IsNested << Field
+ << FieldTy << Field->getSourceRange();
+ }
+
+ // Then we append it to the list to check next in order.
+ FieldTy = FieldTy.getCanonicalType();
+ if (const auto *FieldRecTy = FieldTy->getAs<RecordType>()) {
+ if (llvm::find(RecordTypeList, FieldRecTy) == RecordTypeList.end())
+ RecordTypeList.push_back(FieldRecTy);
}
- S.Diag(Field->getLocation(), diag::note_typecheck_assign_const)
- << NestedConstMember << IsNested << Field
- << Field->getType() << Field->getSourceRange();
}
- }
- // Then, recurse.
- for (const FieldDecl *Field : Ty->getDecl()->fields()) {
- QualType FTy = Field->getType();
- if (const RecordType *FieldRecTy = FTy->getAs<RecordType>())
- DiagnoseRecursiveConstFields(S, VD, FieldRecTy, Loc, Range,
- OEK, DiagnosticEmitted, true);
+ ++NextToCheckIndex;
}
}
@@ -11230,6 +11468,12 @@ static bool IgnoreCommaOperand(const Expr *E) {
if (CE->getCastKind() == CK_ToVoid) {
return true;
}
+
+ // static_cast<void> on a dependent type will not show up as CK_ToVoid.
+ if (CE->getCastKind() == CK_Dependent && E->getType()->isVoidType() &&
+ CE->getSubExpr()->getType()->isDependentType()) {
+ return true;
+ }
}
return false;
@@ -11253,8 +11497,11 @@ void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) {
// The whitelisted locations are the initialization and increment portions
// of a for loop. The additional checks are on the condition of
// if statements, do/while loops, and for loops.
+ // Differences in scope flags for C89 mode requires the extra logic.
const unsigned ForIncrementFlags =
- Scope::ControlScope | Scope::ContinueScope | Scope::BreakScope;
+ getLangOpts().C99 || getLangOpts().CPlusPlus
+ ? Scope::ControlScope | Scope::ContinueScope | Scope::BreakScope
+ : Scope::ContinueScope | Scope::BreakScope;
const unsigned ForInitFlags = Scope::ControlScope | Scope::DeclScope;
const unsigned ScopeFlags = getCurScope()->getFlags();
if ((ScopeFlags & ForIncrementFlags) == ForIncrementFlags ||
@@ -11882,7 +12129,7 @@ static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr,
/// is usually indicative of introspection within the Objective-C pointer.
static void checkObjCPointerIntrospection(Sema &S, ExprResult &L, ExprResult &R,
SourceLocation OpLoc) {
- if (!S.getLangOpts().ObjC1)
+ if (!S.getLangOpts().ObjC)
return;
const Expr *ObjCPointerExpr = nullptr, *OtherExpr = nullptr;
@@ -12650,6 +12897,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
break;
case UO_AddrOf:
resultType = CheckAddressOfOperand(Input, OpLoc);
+ CheckAddressOfNoDeref(InputExpr);
RecordModifiableNonNullParam(*this, InputExpr);
break;
case UO_Deref: {
@@ -12814,6 +13062,11 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
auto *UO = new (Context)
UnaryOperator(Input.get(), Opc, resultType, VK, OK, OpLoc, CanOverflow);
+
+ if (Opc == UO_Deref && UO->getType()->hasAttr(attr::NoDeref) &&
+ !isa<ArrayType>(UO->getType().getDesugaredType(Context)))
+ ExprEvalContexts.back().PossibleDerefs.insert(UO);
+
// Convert the result back to a half vector.
if (ConvertHalfVec)
return convertVector(UO, Context.HalfTy, *this);
@@ -13301,7 +13554,7 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
// Drop the parameters.
FunctionProtoType::ExtProtoInfo EPI;
EPI.HasTrailingReturn = false;
- EPI.TypeQuals |= DeclSpec::TQ_const;
+ EPI.TypeQuals.addConst();
T = Context.getFunctionType(Context.DependentTy, None, EPI);
Sig = Context.getTrivialTypeSourceInfo(T);
}
@@ -13428,6 +13681,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
PopExpressionEvaluationContext();
BlockScopeInfo *BSI = cast<BlockScopeInfo>(FunctionScopes.back());
+ BlockDecl *BD = BSI->TheDecl;
if (BSI->HasImplicitReturnType)
deduceClosureReturnType(*BSI);
@@ -13438,7 +13692,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
if (!BSI->ReturnType.isNull())
RetTy = BSI->ReturnType;
- bool NoReturn = BSI->TheDecl->hasAttr<NoReturnAttr>();
+ bool NoReturn = BD->hasAttr<NoReturnAttr>();
QualType BlockTy;
// Set the captured variables on the block.
@@ -13451,7 +13705,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
Cap.isNested(), Cap.getInitExpr());
Captures.push_back(NewCap);
}
- BSI->TheDecl->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0);
+ BD->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0);
// If the user wrote a function type in some form, try to use that.
if (!BSI->FunctionType.isNull()) {
@@ -13476,7 +13730,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
} else {
const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- EPI.TypeQuals = 0; // FIXME: silently?
+ EPI.TypeQuals = Qualifiers();
EPI.ExtInfo = Ext;
BlockTy = Context.getFunctionType(RetTy, FPT->getParamTypes(), EPI);
}
@@ -13488,7 +13742,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
BlockTy = Context.getFunctionType(RetTy, None, EPI);
}
- DiagnoseUnusedParameters(BSI->TheDecl->parameters());
+ DiagnoseUnusedParameters(BD->parameters());
BlockTy = Context.getBlockPointerType(BlockTy);
// If needed, diagnose invalid gotos and switches in the block.
@@ -13496,19 +13750,19 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
!PP.isCodeCompletionEnabled())
DiagnoseInvalidJumps(cast<CompoundStmt>(Body));
- BSI->TheDecl->setBody(cast<CompoundStmt>(Body));
+ BD->setBody(cast<CompoundStmt>(Body));
if (Body && getCurFunction()->HasPotentialAvailabilityViolations)
- DiagnoseUnguardedAvailabilityViolations(BSI->TheDecl);
+ DiagnoseUnguardedAvailabilityViolations(BD);
// Try to apply the named return value optimization. We have to check again
// if we can do this, though, because blocks keep return statements around
// to deduce an implicit return type.
if (getLangOpts().CPlusPlus && RetTy->isRecordType() &&
- !BSI->TheDecl->isDependentContext())
+ !BD->isDependentContext())
computeNRVO(Body, BSI);
- BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy);
+ BlockExpr *Result = new (Context) BlockExpr(BD, BlockTy);
AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
PopFunctionScopeInfo(&WP, Result->getBlockDecl(), Result);
@@ -13530,6 +13784,9 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
}
}
+ if (getCurFunction())
+ getCurFunction()->addBlock(BD);
+
return Result;
}
@@ -13669,7 +13926,7 @@ ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp,
bool Diagnose) {
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
return false;
const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
@@ -14038,11 +14295,14 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
return ExprError();
}
+ if (!isa<ConstantExpr>(E))
+ E = ConstantExpr::Create(Context, E);
+
// Circumvent ICE checking in C++11 to avoid evaluating the expression twice
// in the non-ICE case.
if (!getLangOpts().CPlusPlus11 && E->isIntegerConstantExpr(Context)) {
if (Result)
- *Result = E->EvaluateKnownConstInt(Context);
+ *Result = E->EvaluateKnownConstIntCheckOverflow(Context);
return E;
}
@@ -14173,6 +14433,51 @@ Sema::PushExpressionEvaluationContext(
PushExpressionEvaluationContext(NewContext, ClosureContextDecl, ExprContext);
}
+namespace {
+
+const DeclRefExpr *CheckPossibleDeref(Sema &S, const Expr *PossibleDeref) {
+ PossibleDeref = PossibleDeref->IgnoreParenImpCasts();
+ if (const auto *E = dyn_cast<UnaryOperator>(PossibleDeref)) {
+ if (E->getOpcode() == UO_Deref)
+ return CheckPossibleDeref(S, E->getSubExpr());
+ } else if (const auto *E = dyn_cast<ArraySubscriptExpr>(PossibleDeref)) {
+ return CheckPossibleDeref(S, E->getBase());
+ } else if (const auto *E = dyn_cast<MemberExpr>(PossibleDeref)) {
+ return CheckPossibleDeref(S, E->getBase());
+ } else if (const auto E = dyn_cast<DeclRefExpr>(PossibleDeref)) {
+ QualType Inner;
+ QualType Ty = E->getType();
+ if (const auto *Ptr = Ty->getAs<PointerType>())
+ Inner = Ptr->getPointeeType();
+ else if (const auto *Arr = S.Context.getAsArrayType(Ty))
+ Inner = Arr->getElementType();
+ else
+ return nullptr;
+
+ if (Inner->hasAttr(attr::NoDeref))
+ return E;
+ }
+ return nullptr;
+}
+
+} // namespace
+
+void Sema::WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec) {
+ for (const Expr *E : Rec.PossibleDerefs) {
+ const DeclRefExpr *DeclRef = CheckPossibleDeref(*this, E);
+ if (DeclRef) {
+ const ValueDecl *Decl = DeclRef->getDecl();
+ Diag(E->getExprLoc(), diag::warn_dereference_of_noderef_type)
+ << Decl->getName() << E->getSourceRange();
+ Diag(Decl->getLocation(), diag::note_previous_decl) << Decl->getName();
+ } else {
+ Diag(E->getExprLoc(), diag::warn_dereference_of_noderef_type_no_decl)
+ << E->getSourceRange();
+ }
+ }
+ Rec.PossibleDerefs.clear();
+}
+
void Sema::PopExpressionEvaluationContext() {
ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back();
unsigned NumTypos = Rec.NumTypos;
@@ -14212,6 +14517,8 @@ void Sema::PopExpressionEvaluationContext() {
}
}
+ WarnOnPendingNoDerefs(Rec);
+
// When are coming out of an unevaluated context, clear out any
// temporaries that we may have created as part of the evaluation of
// the expression in that context: they aren't relevant because they
@@ -14232,11 +14539,8 @@ void Sema::PopExpressionEvaluationContext() {
// Pop the current expression evaluation context off the stack.
ExprEvalContexts.pop_back();
- if (!ExprEvalContexts.empty())
- ExprEvalContexts.back().NumTypos += NumTypos;
- else
- assert(NumTypos == 0 && "There are outstanding typos after popping the "
- "last ExpressionEvaluationContextRecord");
+ // The global expression evaluation context record is never popped.
+ ExprEvalContexts.back().NumTypos += NumTypos;
}
void Sema::DiscardCleanupsInEvaluationContext() {
@@ -14838,6 +15142,21 @@ static void addAsFieldToClosureType(Sema &S, LambdaScopeInfo *LSI,
= FieldDecl::Create(S.Context, Lambda, Loc, Loc, nullptr, FieldType,
S.Context.getTrivialTypeSourceInfo(FieldType, Loc),
nullptr, false, ICIS_NoInit);
+ // If the variable being captured has an invalid type, mark the lambda class
+ // as invalid as well.
+ if (!FieldType->isDependentType()) {
+ if (S.RequireCompleteType(Loc, FieldType, diag::err_field_incomplete)) {
+ Lambda->setInvalidDecl();
+ Field->setInvalidDecl();
+ } else {
+ NamedDecl *Def;
+ FieldType->isIncompleteType(&Def);
+ if (Def && Def->isInvalidDecl()) {
+ Lambda->setInvalidDecl();
+ Field->setInvalidDecl();
+ }
+ }
+ }
Field->setImplicit(true);
Field->setAccess(AS_private);
Lambda->addDecl(Field);
@@ -16468,6 +16787,9 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
#define BUILTIN_TYPE(Id, SingletonId) case BuiltinType::Id:
#define PLACEHOLDER_TYPE(Id, SingletonId)
#include "clang/AST/BuiltinTypes.def"
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index dd5dfaacf2..2b054c4b0f 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -1094,7 +1094,7 @@ QualType Sema::getCurrentThisType() {
Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
Decl *ContextDecl,
- unsigned CXXThisTypeQuals,
+ Qualifiers CXXThisTypeQuals,
bool Enabled)
: S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
{
@@ -1107,11 +1107,10 @@ Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
else
Record = cast<CXXRecordDecl>(ContextDecl);
- // We care only for CVR qualifiers here, so cut everything else.
- CXXThisTypeQuals &= Qualifiers::FastMask;
- S.CXXThisTypeOverride
- = S.Context.getPointerType(
- S.Context.getRecordType(Record).withCVRQualifiers(CXXThisTypeQuals));
+ QualType T = S.Context.getRecordType(Record);
+ T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals);
+
+ S.CXXThisTypeOverride = S.Context.getPointerType(T);
this->Enabled = true;
}
@@ -2816,9 +2815,10 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
// Global allocation functions should always be visible.
Alloc->setVisibleDespiteOwningModule();
- // Implicit sized deallocation functions always have default visibility.
- Alloc->addAttr(
- VisibilityAttr::CreateImplicit(Context, VisibilityAttr::Default));
+ Alloc->addAttr(VisibilityAttr::CreateImplicit(
+ Context, LangOpts.GlobalAllocationFunctionVisibilityHidden
+ ? VisibilityAttr::Hidden
+ : VisibilityAttr::Default));
llvm::SmallVector<ParmVarDecl *, 3> ParamDecls;
for (QualType T : Params) {
@@ -4236,14 +4236,9 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
}
case ICK_Zero_Event_Conversion:
- From = ImpCastExprToType(From, ToType,
- CK_ZeroToOCLEvent,
- From->getValueKind()).get();
- break;
-
case ICK_Zero_Queue_Conversion:
From = ImpCastExprToType(From, ToType,
- CK_ZeroToOCLQueue,
+ CK_ZeroToOCLOpaqueType,
From->getValueKind()).get();
break;
@@ -4276,10 +4271,24 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Qualification: {
// The qualification keeps the category of the inner expression, unless the
// target type isn't a reference.
- ExprValueKind VK = ToType->isReferenceType() ?
- From->getValueKind() : VK_RValue;
- From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context),
- CK_NoOp, VK, /*BasePath=*/nullptr, CCK).get();
+ ExprValueKind VK =
+ ToType->isReferenceType() ? From->getValueKind() : VK_RValue;
+
+ CastKind CK = CK_NoOp;
+
+ if (ToType->isReferenceType() &&
+ ToType->getPointeeType().getAddressSpace() !=
+ From->getType().getAddressSpace())
+ CK = CK_AddressSpaceConversion;
+
+ if (ToType->isPointerType() &&
+ ToType->getPointeeType().getAddressSpace() !=
+ From->getType()->getPointeeType().getAddressSpace())
+ CK = CK_AddressSpaceConversion;
+
+ From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context), CK, VK,
+ /*BasePath=*/nullptr, CCK)
+ .get();
if (SCS.DeprecatedStringLiteralToCharPtr &&
!getLangOpts().WritableStrings) {
@@ -7764,41 +7773,24 @@ Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
bool DiscardedValue,
- bool IsConstexpr,
- bool IsLambdaInitCaptureInitializer) {
+ bool IsConstexpr) {
ExprResult FullExpr = FE;
if (!FullExpr.get())
return ExprError();
- // If we are an init-expression in a lambdas init-capture, we should not
- // diagnose an unexpanded pack now (will be diagnosed once lambda-expr
- // containing full-expression is done).
- // template<class ... Ts> void test(Ts ... t) {
- // test([&a(t)]() { <-- (t) is an init-expr that shouldn't be diagnosed now.
- // return a;
- // }() ...);
- // }
- // FIXME: This is a hack. It would be better if we pushed the lambda scope
- // when we parse the lambda introducer, and teach capturing (but not
- // unexpanded pack detection) to walk over LambdaScopeInfos which don't have a
- // corresponding class yet (that is, have LambdaScopeInfo either represent a
- // lambda where we've entered the introducer but not the body, or represent a
- // lambda where we've entered the body, depending on where the
- // parser/instantiation has got to).
- if (!IsLambdaInitCaptureInitializer &&
- DiagnoseUnexpandedParameterPack(FullExpr.get()))
+ if (DiagnoseUnexpandedParameterPack(FullExpr.get()))
return ExprError();
- // Top-level expressions default to 'id' when we're in a debugger.
- if (DiscardedValue && getLangOpts().DebuggerCastResultToId &&
- FullExpr.get()->getType() == Context.UnknownAnyTy) {
- FullExpr = forceUnknownAnyToType(FullExpr.get(), Context.getObjCIdType());
- if (FullExpr.isInvalid())
- return ExprError();
- }
-
if (DiscardedValue) {
+ // Top-level expressions default to 'id' when we're in a debugger.
+ if (getLangOpts().DebuggerCastResultToId &&
+ FullExpr.get()->getType() == Context.UnknownAnyTy) {
+ FullExpr = forceUnknownAnyToType(FullExpr.get(), Context.getObjCIdType());
+ if (FullExpr.isInvalid())
+ return ExprError();
+ }
+
FullExpr = CheckPlaceholderExpr(FullExpr.get());
if (FullExpr.isInvalid())
return ExprError();
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
index e6d2b5068f..b2b21ba9ee 100644
--- a/lib/Sema/SemaExprMember.cpp
+++ b/lib/Sema/SemaExprMember.cpp
@@ -496,7 +496,7 @@ Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
// allows this, while still reporting an error if T is a struct pointer.
if (!IsArrow) {
const PointerType *PT = BaseType->getAs<PointerType>();
- if (PT && (!getLangOpts().ObjC1 ||
+ if (PT && (!getLangOpts().ObjC ||
PT->getPointeeType()->isRecordType())) {
assert(BaseExpr && "cannot happen with implicit member accesses");
Diag(OpLoc, diag::err_typecheck_member_reference_struct_union)
@@ -1708,9 +1708,31 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
}
ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl};
- return BuildMemberReferenceExpr(Base, Base->getType(), OpLoc, IsArrow, SS,
- TemplateKWLoc, FirstQualifierInScope,
- NameInfo, TemplateArgs, S, &ExtraArgs);
+ ExprResult Res = BuildMemberReferenceExpr(
+ Base, Base->getType(), OpLoc, IsArrow, SS, TemplateKWLoc,
+ FirstQualifierInScope, NameInfo, TemplateArgs, S, &ExtraArgs);
+
+ if (!Res.isInvalid() && isa<MemberExpr>(Res.get()))
+ CheckMemberAccessOfNoDeref(cast<MemberExpr>(Res.get()));
+
+ return Res;
+}
+
+void Sema::CheckMemberAccessOfNoDeref(const MemberExpr *E) {
+ QualType ResultTy = E->getType();
+
+ // Do not warn on member accesses to arrays since this returns an array
+ // lvalue and does not actually dereference memory.
+ if (isa<ArrayType>(ResultTy))
+ return;
+
+ if (E->isArrow()) {
+ if (const auto *Ptr = dyn_cast<PointerType>(
+ E->getBase()->getType().getDesugaredType(Context))) {
+ if (Ptr->getPointeeType()->hasAttr(attr::NoDeref))
+ ExprEvalContexts.back().PossibleDerefs.insert(E);
+ }
+ }
}
ExprResult
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index bee75e4ae6..9eaf747ae7 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -3892,7 +3892,7 @@ static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr,
}
void Sema::CheckTollFreeBridgeCast(QualType castType, Expr *castExpr) {
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
return;
// warn in presence of __bridge casting to or from a toll free bridge cast.
ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExpr->getType());
@@ -3964,7 +3964,7 @@ void Sema::CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr) {
bool Sema::CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind) {
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
return false;
ARCConversionTypeClass exprACTC =
classifyTypeForARCConversion(castExpr->getType());
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index 958dd66612..57277f6e82 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -1192,6 +1192,10 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
if (!VerifyOnly)
SemaRef.Diag(IList->getBeginLoc(), diag::err_init_objc_class) << DeclType;
hadError = true;
+ } else if (DeclType->isOCLIntelSubgroupAVCType()) {
+ // Checks for scalar type are sufficient for these types too.
+ CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
+ StructuredIndex);
} else {
if (!VerifyOnly)
SemaRef.Diag(IList->getBeginLoc(), diag::err_illegal_initializer_type)
@@ -3261,8 +3265,7 @@ void InitializationSequence::Step::Destroy() {
case SK_StdInitializerList:
case SK_StdInitializerListConstructorCall:
case SK_OCLSamplerInit:
- case SK_OCLZeroEvent:
- case SK_OCLZeroQueue:
+ case SK_OCLZeroOpaqueType:
break;
case SK_ConversionSequence:
@@ -3548,16 +3551,9 @@ void InitializationSequence::AddOCLSamplerInitStep(QualType T) {
Steps.push_back(S);
}
-void InitializationSequence::AddOCLZeroEventStep(QualType T) {
- Step S;
- S.Kind = SK_OCLZeroEvent;
- S.Type = T;
- Steps.push_back(S);
-}
-
-void InitializationSequence::AddOCLZeroQueueStep(QualType T) {
+void InitializationSequence::AddOCLZeroOpaqueTypeStep(QualType T) {
Step S;
- S.Kind = SK_OCLZeroQueue;
+ S.Kind = SK_OCLZeroOpaqueType;
S.Type = T;
Steps.push_back(S);
}
@@ -4885,6 +4881,13 @@ static void TryDefaultInitialization(Sema &S,
return;
}
+ // As an extension, and to fix Core issue 1013, zero initialize nullptr_t.
+ // Since there is only 1 valid value of nullptr_t, we can just use that.
+ if (DestType->isNullPtrType()) {
+ Sequence.AddZeroInitializationStep(Entity.getType());
+ return;
+ }
+
// - otherwise, no initialization is performed.
// If a program calls for the default initialization of an object of
@@ -5260,39 +5263,51 @@ static bool TryOCLSamplerInitialization(Sema &S,
return true;
}
-//
-// OpenCL 1.2 spec, s6.12.10
-//
-// The event argument can also be used to associate the
-// async_work_group_copy with a previous async copy allowing
-// an event to be shared by multiple async copies; otherwise
-// event should be zero.
-//
-static bool TryOCLZeroEventInitialization(Sema &S,
- InitializationSequence &Sequence,
- QualType DestType,
- Expr *Initializer) {
- if (!S.getLangOpts().OpenCL || !DestType->isEventT() ||
- !Initializer->isIntegerConstantExpr(S.getASTContext()) ||
- (Initializer->EvaluateKnownConstInt(S.getASTContext()) != 0))
- return false;
-
- Sequence.AddOCLZeroEventStep(DestType);
- return true;
+static bool IsZeroInitializer(Expr *Initializer, Sema &S) {
+ return Initializer->isIntegerConstantExpr(S.getASTContext()) &&
+ (Initializer->EvaluateKnownConstInt(S.getASTContext()) == 0);
}
-static bool TryOCLZeroQueueInitialization(Sema &S,
- InitializationSequence &Sequence,
- QualType DestType,
- Expr *Initializer) {
- if (!S.getLangOpts().OpenCL || S.getLangOpts().OpenCLVersion < 200 ||
- !DestType->isQueueT() ||
- !Initializer->isIntegerConstantExpr(S.getASTContext()) ||
- (Initializer->EvaluateKnownConstInt(S.getASTContext()) != 0))
+static bool TryOCLZeroOpaqueTypeInitialization(Sema &S,
+ InitializationSequence &Sequence,
+ QualType DestType,
+ Expr *Initializer) {
+ if (!S.getLangOpts().OpenCL)
return false;
- Sequence.AddOCLZeroQueueStep(DestType);
- return true;
+ //
+ // OpenCL 1.2 spec, s6.12.10
+ //
+ // The event argument can also be used to associate the
+ // async_work_group_copy with a previous async copy allowing
+ // an event to be shared by multiple async copies; otherwise
+ // event should be zero.
+ //
+ if (DestType->isEventT() || DestType->isQueueT()) {
+ if (!IsZeroInitializer(Initializer, S))
+ return false;
+
+ Sequence.AddOCLZeroOpaqueTypeStep(DestType);
+ return true;
+ }
+
+ // We should allow zero initialization for all types defined in the
+ // cl_intel_device_side_avc_motion_estimation extension, except
+ // intel_sub_group_avc_mce_payload_t and intel_sub_group_avc_mce_result_t.
+ if (S.getOpenCLOptions().isEnabled(
+ "cl_intel_device_side_avc_motion_estimation") &&
+ DestType->isOCLIntelSubgroupAVCType()) {
+ if (DestType->isOCLIntelSubgroupAVCMcePayloadType() ||
+ DestType->isOCLIntelSubgroupAVCMceResultType())
+ return false;
+ if (!IsZeroInitializer(Initializer, S))
+ return false;
+
+ Sequence.AddOCLZeroOpaqueTypeStep(DestType);
+ return true;
+ }
+
+ return false;
}
InitializationSequence::InitializationSequence(Sema &S,
@@ -5397,7 +5412,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
Expr *Initializer = nullptr;
if (Args.size() == 1) {
Initializer = Args[0];
- if (S.getLangOpts().ObjC1) {
+ if (S.getLangOpts().ObjC) {
if (S.CheckObjCBridgeRelatedConversions(Initializer->getBeginLoc(),
DestType, Initializer->getType(),
Initializer) ||
@@ -5519,7 +5534,8 @@ void InitializationSequence::InitializeFrom(Sema &S,
// array from a compound literal that creates an array of the same
// type, so long as the initializer has no side effects.
if (!S.getLangOpts().CPlusPlus && Initializer &&
- isa<CompoundLiteralExpr>(Initializer->IgnoreParens()) &&
+ (isa<ConstantExpr>(Initializer->IgnoreParens()) ||
+ isa<CompoundLiteralExpr>(Initializer->IgnoreParens())) &&
Initializer->getType()->isArrayType()) {
const ArrayType *SourceAT
= Context.getAsArrayType(Initializer->getType());
@@ -5566,12 +5582,9 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (TryOCLSamplerInitialization(S, *this, DestType, Initializer))
return;
- if (TryOCLZeroEventInitialization(S, *this, DestType, Initializer))
+ if (TryOCLZeroOpaqueTypeInitialization(S, *this, DestType, Initializer))
return;
- if (TryOCLZeroQueueInitialization(S, *this, DestType, Initializer))
- return;
-
// Handle initialization in C
AddCAssignmentStep(DestType);
MaybeProduceObjCObject(S, *this, Entity);
@@ -6180,7 +6193,10 @@ PerformConstructorInitialization(Sema &S,
TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
if (!TSInfo)
TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc);
- SourceRange ParenOrBraceRange = Kind.getParenOrBraceRange();
+ SourceRange ParenOrBraceRange =
+ (Kind.getKind() == InitializationKind::IK_DirectList)
+ ? SourceRange(LBraceLoc, RBraceLoc)
+ : Kind.getParenOrBraceRange();
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(
Step.Function.FoundDecl.getDecl())) {
@@ -6441,7 +6457,7 @@ static bool isVarOnPath(IndirectLocalPath &Path, VarDecl *VD) {
}
static bool pathContainsInit(IndirectLocalPath &Path) {
- return std::any_of(Path.begin(), Path.end(), [=](IndirectLocalPathEntry E) {
+ return llvm::any_of(Path, [=](IndirectLocalPathEntry E) {
return E.Kind == IndirectLocalPathEntry::DefaultInit ||
E.Kind == IndirectLocalPathEntry::VarInit;
});
@@ -6529,8 +6545,8 @@ static void visitLocalsRetainedByReferenceBinding(IndirectLocalPath &Path,
do {
Old = Init;
- if (auto *EWC = dyn_cast<ExprWithCleanups>(Init))
- Init = EWC->getSubExpr();
+ if (auto *FE = dyn_cast<FullExpr>(Init))
+ Init = FE->getSubExpr();
if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
// If this is just redundant braces around an initializer, step over it.
@@ -6653,8 +6669,8 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
Init = DIE->getExpr();
}
- if (auto *EWC = dyn_cast<ExprWithCleanups>(Init))
- Init = EWC->getSubExpr();
+ if (auto *FE = dyn_cast<FullExpr>(Init))
+ Init = FE->getSubExpr();
// Dig out the expression which constructs the extended temporary.
Init = const_cast<Expr *>(Init->skipRValueSubobjectAdjustments());
@@ -6786,6 +6802,20 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
return;
}
+ // The lifetime of an init-capture is that of the closure object constructed
+ // by a lambda-expression.
+ if (auto *LE = dyn_cast<LambdaExpr>(Init)) {
+ for (Expr *E : LE->capture_inits()) {
+ if (!E)
+ continue;
+ if (E->isGLValue())
+ visitLocalsRetainedByReferenceBinding(Path, E, RK_ReferenceBinding,
+ Visit);
+ else
+ visitLocalsRetainedByInitializer(Path, E, Visit, true);
+ }
+ }
+
if (isa<CallExpr>(Init) || isa<CXXConstructExpr>(Init))
return visitLifetimeBoundArguments(Path, Init, Visit);
@@ -7238,12 +7268,20 @@ ExprResult Sema::TemporaryMaterializationConversion(Expr *E) {
return CreateMaterializeTemporaryExpr(E->getType(), E, false);
}
-ExprResult
-InitializationSequence::Perform(Sema &S,
- const InitializedEntity &Entity,
- const InitializationKind &Kind,
- MultiExprArg Args,
- QualType *ResultType) {
+ExprResult Sema::PerformQualificationConversion(Expr *E, QualType Ty,
+ ExprValueKind VK,
+ CheckedConversionKind CCK) {
+ CastKind CK = (Ty.getAddressSpace() != E->getType().getAddressSpace())
+ ? CK_AddressSpaceConversion
+ : CK_NoOp;
+ return ImpCastExprToType(E, Ty, CK, VK, /*BasePath=*/nullptr, CCK);
+}
+
+ExprResult InitializationSequence::Perform(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ QualType *ResultType) {
if (Failed()) {
Diagnose(S, Entity, Kind, Args);
return ExprError();
@@ -7393,8 +7431,7 @@ InitializationSequence::Perform(Sema &S,
case SK_ProduceObjCObject:
case SK_StdInitializerList:
case SK_OCLSamplerInit:
- case SK_OCLZeroEvent:
- case SK_OCLZeroQueue: {
+ case SK_OCLZeroOpaqueType: {
assert(Args.size() == 1);
CurInit = Args[0];
if (!CurInit.get()) return ExprError();
@@ -7632,12 +7669,11 @@ InitializationSequence::Perform(Sema &S,
case SK_QualificationConversionRValue: {
// Perform a qualification conversion; these can never go wrong.
ExprValueKind VK =
- Step->Kind == SK_QualificationConversionLValue ?
- VK_LValue :
- (Step->Kind == SK_QualificationConversionXValue ?
- VK_XValue :
- VK_RValue);
- CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type, CK_NoOp, VK);
+ Step->Kind == SK_QualificationConversionLValue
+ ? VK_LValue
+ : (Step->Kind == SK_QualificationConversionXValue ? VK_XValue
+ : VK_RValue);
+ CurInit = S.PerformQualificationConversion(CurInit.get(), Step->Type, VK);
break;
}
@@ -7658,6 +7694,18 @@ InitializationSequence::Perform(Sema &S,
case SK_ConversionSequence:
case SK_ConversionSequenceNoNarrowing: {
+ if (const auto *FromPtrType =
+ CurInit.get()->getType()->getAs<PointerType>()) {
+ if (const auto *ToPtrType = Step->Type->getAs<PointerType>()) {
+ if (FromPtrType->getPointeeType()->hasAttr(attr::NoDeref) &&
+ !ToPtrType->getPointeeType()->hasAttr(attr::NoDeref)) {
+ S.Diag(CurInit.get()->getExprLoc(),
+ diag::warn_noderef_to_dereferenceable_pointer)
+ << CurInit.get()->getSourceRange();
+ }
+ }
+ }
+
Sema::CheckedConversionKind CCK
= Kind.isCStyleCast()? Sema::CCK_CStyleCast
: Kind.isFunctionalCast()? Sema::CCK_FunctionalCast
@@ -7824,6 +7872,7 @@ InitializationSequence::Perform(Sema &S,
case SK_CAssignment: {
QualType SourceType = CurInit.get()->getType();
+
// Save off the initial CurInit in case we need to emit a diagnostic
ExprResult InitialCurInit = CurInit;
ExprResult Result = CurInit;
@@ -7959,7 +8008,7 @@ InitializationSequence::Perform(Sema &S,
}
case SK_OCLSamplerInit: {
- // Sampler initialzation have 5 cases:
+ // Sampler initialization have 5 cases:
// 1. function argument passing
// 1a. argument is a file-scope variable
// 1b. argument is a function-scope variable
@@ -8021,8 +8070,9 @@ InitializationSequence::Perform(Sema &S,
break;
}
- llvm::APSInt Result;
- Init->EvaluateAsInt(Result, S.Context);
+ Expr::EvalResult EVResult;
+ Init->EvaluateAsInt(EVResult, S.Context);
+ llvm::APSInt Result = EVResult.Val.getInt();
const uint64_t SamplerValue = Result.getLimitedValue();
// 32-bit value of sampler's initializer is interpreted as
// bit-field with the following structure:
@@ -8032,7 +8082,9 @@ InitializationSequence::Perform(Sema &S,
// defined in SPIR spec v1.2 and also opencl-c.h
unsigned AddressingMode = (0x0E & SamplerValue) >> 1;
unsigned FilterMode = (0x30 & SamplerValue) >> 4;
- if (FilterMode != 1 && FilterMode != 2)
+ if (FilterMode != 1 && FilterMode != 2 &&
+ !S.getOpenCLOptions().isEnabled(
+ "cl_intel_device_side_avc_motion_estimation"))
S.Diag(Kind.getLocation(),
diag::warn_sampler_initializer_invalid_bits)
<< "Filter Mode";
@@ -8048,21 +8100,13 @@ InitializationSequence::Perform(Sema &S,
CK_IntToOCLSampler);
break;
}
- case SK_OCLZeroEvent: {
- assert(Step->Type->isEventT() &&
- "Event initialization on non-event type.");
+ case SK_OCLZeroOpaqueType: {
+ assert((Step->Type->isEventT() || Step->Type->isQueueT() ||
+ Step->Type->isOCLIntelSubgroupAVCType()) &&
+ "Wrong type for initialization of OpenCL opaque type.");
CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
- CK_ZeroToOCLEvent,
- CurInit.get()->getValueKind());
- break;
- }
- case SK_OCLZeroQueue: {
- assert(Step->Type->isQueueT() &&
- "Event initialization on non queue type.");
-
- CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
- CK_ZeroToOCLQueue,
+ CK_ZeroToOCLOpaqueType,
CurInit.get()->getValueKind());
break;
}
@@ -8255,7 +8299,8 @@ bool InitializationSequence::Diagnose(Sema &S,
break;
case FK_UTF8StringIntoPlainChar:
S.Diag(Kind.getLocation(),
- diag::err_array_init_utf8_string_into_char);
+ diag::err_array_init_utf8_string_into_char)
+ << S.getLangOpts().CPlusPlus2a;
break;
case FK_ArrayTypeMismatch:
case FK_NonConstantArrayInit:
@@ -8945,12 +8990,8 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "OpenCL sampler_t from integer constant";
break;
- case SK_OCLZeroEvent:
- OS << "OpenCL event_t from zero";
- break;
-
- case SK_OCLZeroQueue:
- OS << "OpenCL queue_t from zero";
+ case SK_OCLZeroOpaqueType:
+ OS << "OpenCL opaque type from zero";
break;
}
diff --git a/lib/Sema/SemaLambda.cpp b/lib/Sema/SemaLambda.cpp
index 8000cb4fbf..6dc93d0761 100644
--- a/lib/Sema/SemaLambda.cpp
+++ b/lib/Sema/SemaLambda.cpp
@@ -493,7 +493,9 @@ void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) {
LSI->finishedExplicitCaptures();
}
-void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) {
+void Sema::addLambdaParameters(
+ ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
+ CXXMethodDecl *CallOperator, Scope *CurScope) {
// Introduce our parameters into the function scope
for (unsigned p = 0, NumParams = CallOperator->getNumParams();
p < NumParams; ++p) {
@@ -501,7 +503,19 @@ void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) {
// If this has an identifier, add it to the scope stack.
if (CurScope && Param->getIdentifier()) {
- CheckShadow(CurScope, Param);
+ bool Error = false;
+ // Resolution of CWG 2211 in C++17 renders shadowing ill-formed, but we
+ // retroactively apply it.
+ for (const auto &Capture : Captures) {
+ if (Capture.Id == Param->getIdentifier()) {
+ Error = true;
+ Diag(Param->getLocation(), diag::err_parameter_shadow_capture);
+ Diag(Capture.Loc, diag::note_var_explicitly_captured_here)
+ << Capture.Id << true;
+ }
+ }
+ if (!Error)
+ CheckShadow(CurScope, Param);
PushOnScopeChains(Param, CurScope);
}
@@ -775,16 +789,6 @@ QualType Sema::buildLambdaInitCaptureInitialization(SourceLocation Loc,
if (Result.isInvalid())
return QualType();
- Init = Result.getAs<Expr>();
-
- // The init-capture initialization is a full-expression that must be
- // processed as one before we enter the declcontext of the lambda's
- // call-operator.
- Result = ActOnFinishFullExpr(Init, Loc, /*DiscardedValue*/ false,
- /*IsConstexpr*/ false,
- /*IsLambdaInitCaptureInitializer*/ true);
- if (Result.isInvalid())
- return QualType();
Init = Result.getAs<Expr>();
return DeducedType;
@@ -855,7 +859,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true));
EPI.HasTrailingReturn = true;
- EPI.TypeQuals |= DeclSpec::TQ_const;
+ EPI.TypeQuals.addConst();
// C++1y [expr.prim.lambda]:
// The lambda return type is 'auto', which is replaced by the
// trailing-return type if provided and/or deduced from 'return'
@@ -1152,7 +1156,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
LSI->ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
// Add lambda parameters into scope.
- addLambdaParameters(Method, CurScope);
+ addLambdaParameters(Intro.Captures, Method, CurScope);
// Enter a new evaluation context to insulate the lambda from any
// cleanups from the enclosing full-expression.
@@ -1194,7 +1198,7 @@ QualType Sema::getLambdaConversionFunctionResultType(
CallingConv CC = Context.getDefaultCallingConvention(
CallOpProto->isVariadic(), /*IsCXXMethod=*/false);
InvokerExtInfo.ExtInfo = InvokerExtInfo.ExtInfo.withCallingConv(CC);
- InvokerExtInfo.TypeQuals = 0;
+ InvokerExtInfo.TypeQuals = Qualifiers();
assert(InvokerExtInfo.RefQualifier == RQ_None &&
"Lambda's call operator should not have a reference qualifier");
return Context.getFunctionType(CallOpProto->getReturnType(),
@@ -1225,7 +1229,8 @@ static void addFunctionPointerConversion(Sema &S,
S.Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true));
// The conversion function is always const.
- ConvExtInfo.TypeQuals = Qualifiers::Const;
+ ConvExtInfo.TypeQuals = Qualifiers();
+ ConvExtInfo.TypeQuals.addConst();
QualType ConvTy =
S.Context.getFunctionType(PtrToFunctionTy, None, ConvExtInfo);
@@ -1373,7 +1378,8 @@ static void addBlockPointerConversion(Sema &S,
FunctionProtoType::ExtProtoInfo ConversionEPI(
S.Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true));
- ConversionEPI.TypeQuals = Qualifiers::Const;
+ ConversionEPI.TypeQuals = Qualifiers();
+ ConversionEPI.TypeQuals.addConst();
QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, None, ConversionEPI);
SourceLocation Loc = IntroducerRange.getBegin();
@@ -1632,7 +1638,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
// same parameter and return types as the closure type's function call
// operator.
// FIXME: Fix generic lambda to block conversions.
- if (getLangOpts().Blocks && getLangOpts().ObjC1 && !IsGenericLambda)
+ if (getLangOpts().Blocks && getLangOpts().ObjC && !IsGenericLambda)
addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator);
// Finalize the lambda class.
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index 6a1aae6241..a8a3651c5d 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -1600,9 +1600,9 @@ bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) {
return false;
// Check whether M is transitively exported to an import of the lookup set.
- return std::any_of(LookupModules.begin(), LookupModules.end(),
- [&](const Module *LookupM) {
- return LookupM->isModuleVisible(M); });
+ return llvm::any_of(LookupModules, [&](const Module *LookupM) {
+ return LookupM->isModuleVisible(M);
+ });
}
bool Sema::isVisibleSlow(const NamedDecl *D) {
@@ -3619,8 +3619,9 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
// Find results in this base class (and its bases).
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(RD, Result, QualifiedNameLookup, true, Consumer,
- Visited, IncludeDependentBases, LoadExternal);
+ LookupVisibleDecls(RD, Result, QualifiedNameLookup, /*InBaseClass=*/true,
+ Consumer, Visited, IncludeDependentBases,
+ LoadExternal);
}
}
@@ -4061,7 +4062,7 @@ void TypoCorrectionConsumer::addNamespaces(
}
// Do not transform this into an iterator-based loop. The loop body can
// trigger the creation of further types (through lazy deserialization) and
- // invalide iterators into this list.
+ // invalid iterators into this list.
auto &Types = SemaRef.getASTContext().getTypes();
for (unsigned I = 0; I != Types.size(); ++I) {
const auto *TI = Types[I];
@@ -4202,7 +4203,7 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
SS->getScopeRep()->print(OldOStream, SemaRef.getPrintingPolicy());
OldOStream << Typo->getName();
// If correction candidate would be an identical written qualified
- // identifer, then the existing CXXScopeSpec probably included a
+ // identifier, then the existing CXXScopeSpec probably included a
// typedef that didn't get accounted for properly.
if (OldOStream.str() == NewQualified)
break;
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index 75351ebabd..2daf45411e 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -138,9 +138,11 @@ private:
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
llvm::Optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
+ unsigned AssociatedLoops = 1;
+ const Decl *PossiblyLoopCounter = nullptr;
bool NowaitRegion = false;
bool CancelRegion = false;
- unsigned AssociatedLoops = 1;
+ bool LoopStart = false;
SourceLocation InnerTeamsRegionLoc;
/// Reference to the taskgroup task_reduction reference expression.
Expr *TaskgroupReductionRef = nullptr;
@@ -162,6 +164,9 @@ private:
OpenMPClauseKind ClauseKindMode = OMPC_unknown;
Sema &SemaRef;
bool ForceCapturing = false;
+ /// true if all the vaiables in the target executable directives must be
+ /// captured by reference.
+ bool ForceCaptureByReferenceInTargetExecutable = false;
CriticalsWithHintsTy Criticals;
using iterator = StackTy::const_reverse_iterator;
@@ -193,6 +198,13 @@ public:
bool isForceVarCapturing() const { return ForceCapturing; }
void setForceVarCapturing(bool V) { ForceCapturing = V; }
+ void setForceCaptureByReferenceInTargetExecutable(bool V) {
+ ForceCaptureByReferenceInTargetExecutable = V;
+ }
+ bool isForceCaptureByReferenceInTargetExecutable() const {
+ return ForceCaptureByReferenceInTargetExecutable;
+ }
+
void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
if (Stack.empty() ||
@@ -208,6 +220,33 @@ public:
Stack.back().first.pop_back();
}
+ /// Marks that we're started loop parsing.
+ void loopInit() {
+ assert(isOpenMPLoopDirective(getCurrentDirective()) &&
+ "Expected loop-based directive.");
+ Stack.back().first.back().LoopStart = true;
+ }
+ /// Start capturing of the variables in the loop context.
+ void loopStart() {
+ assert(isOpenMPLoopDirective(getCurrentDirective()) &&
+ "Expected loop-based directive.");
+ Stack.back().first.back().LoopStart = false;
+ }
+ /// true, if variables are captured, false otherwise.
+ bool isLoopStarted() const {
+ assert(isOpenMPLoopDirective(getCurrentDirective()) &&
+ "Expected loop-based directive.");
+ return !Stack.back().first.back().LoopStart;
+ }
+ /// Marks (or clears) declaration as possibly loop counter.
+ void resetPossibleLoopCounter(const Decl *D = nullptr) {
+ Stack.back().first.back().PossiblyLoopCounter =
+ D ? D->getCanonicalDecl() : D;
+ }
+ /// Gets the possible loop counter decl.
+ const Decl *getPossiblyLoopCunter() const {
+ return Stack.back().first.back().PossiblyLoopCounter;
+ }
/// Start new OpenMP region stack in new non-capturing function.
void pushFunction() {
const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
@@ -354,7 +393,7 @@ public:
return OMPD_unknown;
return std::next(Stack.back().first.rbegin())->Directive;
}
-
+
/// Add requires decl to internal vector
void addRequiresDecl(OMPRequiresDecl *RD) {
RequiresDecls.push_back(RD);
@@ -381,7 +420,7 @@ public:
}
return IsDuplicate;
}
-
+
/// Set default data sharing attribute to none.
void setDefaultDSANone(SourceLocation Loc) {
assert(!isStackEmpty());
@@ -630,8 +669,8 @@ bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
} // namespace
static const Expr *getExprAsWritten(const Expr *E) {
- if (const auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
- E = ExprTemp->getSubExpr();
+ if (const auto *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr();
if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
E = MTE->GetTemporaryExpr();
@@ -1236,10 +1275,16 @@ bool DSAStackTy::hasExplicitDSA(
return false;
std::advance(StartI, Level);
auto I = StartI->SharingMap.find(D);
- return (I != StartI->SharingMap.end()) &&
+ if ((I != StartI->SharingMap.end()) &&
I->getSecond().RefExpr.getPointer() &&
CPred(I->getSecond().Attributes) &&
- (!NotLastprivate || !I->getSecond().RefExpr.getInt());
+ (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
+ return true;
+ // Check predetermined rules for the loop control variables.
+ auto LI = StartI->LCVMap.find(D);
+ if (LI != StartI->LCVMap.end())
+ return CPred(OMPC_private);
+ return false;
}
bool DSAStackTy::hasExplicitDirective(
@@ -1406,6 +1451,8 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
// By default, all the data that has a scalar type is mapped by copy
// (except for reduction variables).
IsByRef =
+ (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
+ !Ty->isAnyPointerType()) ||
!Ty->isScalarType() ||
DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
DSAStack->hasExplicitDSA(
@@ -1415,10 +1462,12 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
IsByRef =
- !DSAStack->hasExplicitDSA(
- D,
- [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
- Level, /*NotLastprivate=*/true) &&
+ ((DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
+ !Ty->isAnyPointerType()) ||
+ !DSAStack->hasExplicitDSA(
+ D,
+ [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
+ Level, /*NotLastprivate=*/true)) &&
// If the variable is artificial and must be captured by value - try to
// capture by value.
!(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
@@ -1480,6 +1529,49 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) {
return VD;
}
}
+ // Capture variables captured by reference in lambdas for target-based
+ // directives.
+ if (VD && !DSAStack->isClauseParsingMode()) {
+ if (const auto *RD = VD->getType()
+ .getCanonicalType()
+ .getNonReferenceType()
+ ->getAsCXXRecordDecl()) {
+ bool SavedForceCaptureByReferenceInTargetExecutable =
+ DSAStack->isForceCaptureByReferenceInTargetExecutable();
+ DSAStack->setForceCaptureByReferenceInTargetExecutable(/*V=*/true);
+ if (RD->isLambda()) {
+ llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ FieldDecl *ThisCapture;
+ RD->getCaptureFields(Captures, ThisCapture);
+ for (const LambdaCapture &LC : RD->captures()) {
+ if (LC.getCaptureKind() == LCK_ByRef) {
+ VarDecl *VD = LC.getCapturedVar();
+ DeclContext *VDC = VD->getDeclContext();
+ if (!VDC->Encloses(CurContext))
+ continue;
+ DSAStackTy::DSAVarData DVarPrivate =
+ DSAStack->getTopDSA(VD, /*FromParent=*/false);
+ // Do not capture already captured variables.
+ if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
+ DVarPrivate.CKind == OMPC_unknown &&
+ !DSAStack->checkMappableExprComponentListsForDecl(
+ D, /*CurrentRegionOnly=*/true,
+ [](OMPClauseMappableExprCommon::
+ MappableExprComponentListRef,
+ OpenMPClauseKind) { return true; }))
+ MarkVariableReferenced(LC.getLocation(), LC.getCapturedVar());
+ } else if (LC.getCaptureKind() == LCK_This) {
+ QualType ThisTy = getCurrentThisType();
+ if (!ThisTy.isNull() &&
+ Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
+ CheckCXXThisCapture(LC.getLocation());
+ }
+ }
+ }
+ DSAStack->setForceCaptureByReferenceInTargetExecutable(
+ SavedForceCaptureByReferenceInTargetExecutable);
+ }
+ }
if (DSAStack->getCurrentDirective() != OMPD_unknown &&
(!DSAStack->isClauseParsingMode() ||
@@ -1510,8 +1602,28 @@ void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
FunctionScopesIndex -= Regions.size();
}
+void Sema::startOpenMPLoop() {
+ assert(LangOpts.OpenMP && "OpenMP must be enabled.");
+ if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
+ DSAStack->loopInit();
+}
+
bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
+ if (DSAStack->getAssociatedLoops() > 0 &&
+ !DSAStack->isLoopStarted()) {
+ DSAStack->resetPossibleLoopCounter(D);
+ DSAStack->loopStart();
+ return true;
+ }
+ if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
+ DSAStack->isLoopControlVariable(D).first) &&
+ !DSAStack->hasExplicitDSA(
+ D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
+ !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
+ return true;
+ }
return DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
(DSAStack->isClauseParsingMode() &&
@@ -2021,6 +2133,30 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
+ void VisitSubCaptures(OMPExecutableDirective *S) {
+ // Check implicitly captured variables.
+ if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
+ return;
+ for (const CapturedStmt::Capture &Cap :
+ S->getInnermostCapturedStmt()->captures()) {
+ if (!Cap.capturesVariable())
+ continue;
+ VarDecl *VD = Cap.getCapturedVar();
+ // Do not try to map the variable if it or its sub-component was mapped
+ // already.
+ if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
+ Stack->checkMappableExprComponentListsForDecl(
+ VD, /*CurrentRegionOnly=*/true,
+ [](OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind) { return true; }))
+ continue;
+ DeclRefExpr *DRE = buildDeclRefExpr(
+ SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
+ Cap.getLocation(), /*RefersToCapture=*/true);
+ Visit(DRE);
+ }
+ }
+
public:
void VisitDeclRefExpr(DeclRefExpr *E) {
if (E->isTypeDependent() || E->isValueDependent() ||
@@ -2181,8 +2317,14 @@ public:
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
- !Stack->isLoopControlVariable(FD).first)
- ImplicitFirstprivate.push_back(E);
+ !Stack->isLoopControlVariable(FD).first) {
+ // Check if there is a captured expression for the current field in the
+ // region. Do not mark it as firstprivate unless there is no captured
+ // expression.
+ // TODO: try to make it firstprivate.
+ if (DVar.CKind != OMPC_unknown)
+ ImplicitFirstprivate.push_back(E);
+ }
return;
}
if (isOpenMPTargetExecutionDirective(DKind)) {
@@ -2242,11 +2384,20 @@ public:
}
}
}
+ // Check implicitly captured variables.
+ VisitSubCaptures(S);
}
void VisitStmt(Stmt *S) {
for (Stmt *C : S->children()) {
- if (C && !isa<OMPExecutableDirective>(C))
- Visit(C);
+ if (C) {
+ if (auto *OED = dyn_cast<OMPExecutableDirective>(C)) {
+ // Check implicitly captured variables in the task-based directives to
+ // check if they must be firstprivatized.
+ VisitSubCaptures(OED);
+ } else {
+ Visit(C);
+ }
+ }
}
}
@@ -3760,7 +3911,8 @@ class OpenMPIterationSpaceChecker {
/// Var <= UB
/// UB > Var
/// UB >= Var
- bool TestIsLessOp = false;
+ /// This will have no value when the condition is !=
+ llvm::Optional<bool> TestIsLessOp;
/// This flag is true when condition is strict ( < or > ).
bool TestIsStrictOp = false;
/// This flag is true when step is subtracted on each iteration.
@@ -3826,8 +3978,8 @@ private:
/// Helper to set loop counter variable and its initializer.
bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB);
/// Helper to set upper bound.
- bool setUB(Expr *NewUB, bool LessOp, bool StrictOp, SourceRange SR,
- SourceLocation SL);
+ bool setUB(Expr *NewUB, llvm::Optional<bool> LessOp, bool StrictOp,
+ SourceRange SR, SourceLocation SL);
/// Helper to set loop increment.
bool setStep(Expr *NewStep, bool Subtract);
};
@@ -3862,15 +4014,17 @@ bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
return false;
}
-bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, bool LessOp, bool StrictOp,
- SourceRange SR, SourceLocation SL) {
+bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, llvm::Optional<bool> LessOp,
+ bool StrictOp, SourceRange SR,
+ SourceLocation SL) {
// State consistency checking to ensure correct usage.
assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
if (!NewUB)
return true;
UB = NewUB;
- TestIsLessOp = LessOp;
+ if (LessOp)
+ TestIsLessOp = LessOp;
TestIsStrictOp = StrictOp;
ConditionSrcRange = SR;
ConditionLoc = SL;
@@ -3910,18 +4064,23 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
bool IsConstPos =
IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
bool IsConstZero = IsConstant && !Result.getBoolValue();
+
+ // != with increment is treated as <; != with decrement is treated as >
+ if (!TestIsLessOp.hasValue())
+ TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
if (UB && (IsConstZero ||
- (TestIsLessOp ? (IsConstNeg || (IsUnsigned && Subtract))
- : (IsConstPos || (IsUnsigned && !Subtract))))) {
+ (TestIsLessOp.getValue() ?
+ (IsConstNeg || (IsUnsigned && Subtract)) :
+ (IsConstPos || (IsUnsigned && !Subtract))))) {
SemaRef.Diag(NewStep->getExprLoc(),
diag::err_omp_loop_incr_not_compatible)
- << LCDecl << TestIsLessOp << NewStep->getSourceRange();
+ << LCDecl << TestIsLessOp.getValue() << NewStep->getSourceRange();
SemaRef.Diag(ConditionLoc,
diag::note_omp_loop_cond_requres_compatible_incr)
- << TestIsLessOp << ConditionSrcRange;
+ << TestIsLessOp.getValue() << ConditionSrcRange;
return true;
}
- if (TestIsLessOp == Subtract) {
+ if (TestIsLessOp.getValue() == Subtract) {
NewStep =
SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
.get();
@@ -4062,7 +4221,12 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
(BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
(BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
BO->getSourceRange(), BO->getOperatorLoc());
- }
+ } else if (BO->getOpcode() == BO_NE)
+ return setUB(getInitLCDecl(BO->getLHS()) == LCDecl ?
+ BO->getRHS() : BO->getLHS(),
+ /*LessOp=*/llvm::None,
+ /*StrictOp=*/true,
+ BO->getSourceRange(), BO->getOperatorLoc());
} else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
if (CE->getNumArgs() == 2) {
auto Op = CE->getOperator();
@@ -4080,6 +4244,14 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
CE->getOperatorLoc());
break;
+ case OO_ExclaimEqual:
+ return setUB(getInitLCDecl(CE->getArg(0)) == LCDecl ?
+ CE->getArg(1) : CE->getArg(0),
+ /*LessOp=*/llvm::None,
+ /*StrictOp=*/true,
+ CE->getSourceRange(),
+ CE->getOperatorLoc());
+ break;
default:
break;
}
@@ -4228,8 +4400,8 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (VarType->isIntegerType() || VarType->isPointerType() ||
SemaRef.getLangOpts().CPlusPlus) {
// Upper - Lower
- Expr *UBExpr = TestIsLessOp ? UB : LB;
- Expr *LBExpr = TestIsLessOp ? LB : UB;
+ Expr *UBExpr = TestIsLessOp.getValue() ? UB : LB;
+ Expr *LBExpr = TestIsLessOp.getValue() ? LB : UB;
Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
if (!Upper || !Lower)
@@ -4330,8 +4502,9 @@ Expr *OpenMPIterationSpaceChecker::buildPreCond(
ExprResult CondExpr =
SemaRef.BuildBinOp(S, DefaultLoc,
- TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE)
- : (TestIsStrictOp ? BO_GT : BO_GE),
+ TestIsLessOp.getValue() ?
+ (TestIsStrictOp ? BO_LT : BO_LE) :
+ (TestIsStrictOp ? BO_GT : BO_GE),
NewLB.get(), NewUB.get());
if (CondExpr.isUsable()) {
if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
@@ -4409,9 +4582,9 @@ Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
SemaRef.getLangOpts().CPlusPlus) {
// Upper - Lower
Expr *Upper =
- TestIsLessOp ? Cnt : tryBuildCapture(SemaRef, UB, Captures).get();
+ TestIsLessOp.getValue() ? Cnt : tryBuildCapture(SemaRef, UB, Captures).get();
Expr *Lower =
- TestIsLessOp ? tryBuildCapture(SemaRef, LB, Captures).get() : Cnt;
+ TestIsLessOp.getValue() ? tryBuildCapture(SemaRef, LB, Captures).get() : Cnt;
if (!Upper || !Lower)
return nullptr;
@@ -4493,6 +4666,15 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
}
}
DSAStack->addLoopControlVariable(D, VD);
+ const Decl *LD = DSAStack->getPossiblyLoopCunter();
+ if (LD != D->getCanonicalDecl()) {
+ DSAStack->resetPossibleLoopCounter();
+ if (auto *Var = dyn_cast_or_null<VarDecl>(LD))
+ MarkDeclarationsReferencedInExpr(
+ buildDeclRefExpr(*this, const_cast<VarDecl *>(Var),
+ Var->getType().getNonLValueExprType(Context),
+ ForLoc, /*RefersToCapture=*/true));
+ }
}
}
DSAStack->setAssociatedLoops(AssociatedLoops - 1);
@@ -4866,15 +5048,16 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
unsigned NestedLoopCount = 1;
if (CollapseLoopCountExpr) {
// Found 'collapse' clause - calculate collapse number.
- llvm::APSInt Result;
+ Expr::EvalResult Result;
if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
- NestedLoopCount = Result.getLimitedValue();
+ NestedLoopCount = Result.Val.getInt().getLimitedValue();
}
unsigned OrderedLoopCount = 1;
if (OrderedLoopCountExpr) {
// Found 'ordered' clause - calculate collapse number.
- llvm::APSInt Result;
- if (OrderedLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext())) {
+ Expr::EvalResult EVResult;
+ if (OrderedLoopCountExpr->EvaluateAsInt(EVResult, SemaRef.getASTContext())) {
+ llvm::APSInt Result = EVResult.Val.getInt();
if (Result.getLimitedValue() < NestedLoopCount) {
SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
diag::err_omp_wrong_ordered_loop_count)
@@ -5201,6 +5384,13 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
: SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
NumIterations.get());
+ ExprResult CombDistCond;
+ if (isOpenMPLoopBoundSharingDirective(DKind)) {
+ CombDistCond =
+ SemaRef.BuildBinOp(
+ CurScope, CondLoc, BO_LT, IV.get(), NumIterations.get());
+ }
+
ExprResult CombCond;
if (isOpenMPLoopBoundSharingDirective(DKind)) {
CombCond =
@@ -5275,7 +5465,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// on PrevUB instead of NumIterations - used to implement 'for' when found
// in combination with 'distribute', like in 'distribute parallel for'
SourceLocation DistIncLoc = AStmt->getBeginLoc();
- ExprResult DistCond, DistInc, PrevEUB;
+ ExprResult DistCond, DistInc, PrevEUB, ParForInDistCond;
if (isOpenMPLoopBoundSharingDirective(DKind)) {
DistCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get());
assert(DistCond.isUsable() && "distribute cond expr was not built");
@@ -5298,6 +5488,11 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
PrevEUB = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, UB.get(),
CondOp.get());
PrevEUB = SemaRef.ActOnFinishFullExpr(PrevEUB.get());
+
+ // Build IV <= PrevUB to be used in parallel for is in combination with
+ // a distribute directive with schedule(static, 1)
+ ParForInDistCond =
+ SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), PrevUB.get());
}
// Build updates and final values of the loop counters.
@@ -5421,6 +5616,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Built.DistCombinedFields.Cond = CombCond.get();
Built.DistCombinedFields.NLB = CombNextLB.get();
Built.DistCombinedFields.NUB = CombNextUB.get();
+ Built.DistCombinedFields.DistCond = CombDistCond.get();
+ Built.DistCombinedFields.ParForInDistCond = ParForInDistCond.get();
return NestedLoopCount;
}
@@ -5456,7 +5653,6 @@ static bool checkSimdlenSafelenSpecified(Sema &S,
}
if (Simdlen && Safelen) {
- llvm::APSInt SimdlenRes, SafelenRes;
const Expr *SimdlenLength = Simdlen->getSimdlen();
const Expr *SafelenLength = Safelen->getSafelen();
if (SimdlenLength->isValueDependent() || SimdlenLength->isTypeDependent() ||
@@ -5467,8 +5663,11 @@ static bool checkSimdlenSafelenSpecified(Sema &S,
SafelenLength->isInstantiationDependent() ||
SafelenLength->containsUnexpandedParameterPack())
return false;
- SimdlenLength->EvaluateAsInt(SimdlenRes, S.Context);
- SafelenLength->EvaluateAsInt(SafelenRes, S.Context);
+ Expr::EvalResult SimdlenResult, SafelenResult;
+ SimdlenLength->EvaluateAsInt(SimdlenResult, S.Context);
+ SafelenLength->EvaluateAsInt(SafelenResult, S.Context);
+ llvm::APSInt SimdlenRes = SimdlenResult.Val.getInt();
+ llvm::APSInt SafelenRes = SafelenResult.Val.getInt();
// OpenMP 4.5 [2.8.1, simd Construct, Restrictions]
// If both simdlen and safelen clauses are specified, the value of the
// simdlen parameter must be less than or equal to the value of the safelen
@@ -8011,6 +8210,10 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -8533,6 +8736,10 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Unexpected OpenMP clause.");
}
return CaptureRegion;
@@ -8801,6 +9008,11 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
static_cast<OpenMPProcBindClauseKind>(Argument), ArgumentLoc, StartLoc,
LParenLoc, EndLoc);
break;
+ case OMPC_atomic_default_mem_order:
+ Res = ActOnOpenMPAtomicDefaultMemOrderClause(
+ static_cast<OpenMPAtomicDefaultMemOrderClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -8851,6 +9063,9 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -8923,6 +9138,21 @@ OMPClause *Sema::ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
OMPProcBindClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPAtomicDefaultMemOrderClause(
+ OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindKwLoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
+ if (Kind == OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown) {
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(
+ OMPC_atomic_default_mem_order, /*First=*/0,
+ /*Last=*/OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown)
+ << getOpenMPClauseName(OMPC_atomic_default_mem_order);
+ return nullptr;
+ }
+ return new (Context) OMPAtomicDefaultMemOrderClause(Kind, KindKwLoc, StartLoc,
+ LParenLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Argument, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
@@ -9008,6 +9238,10 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -9166,6 +9400,15 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_unified_address:
Res = ActOnOpenMPUnifiedAddressClause(StartLoc, EndLoc);
break;
+ case OMPC_unified_shared_memory:
+ Res = ActOnOpenMPUnifiedSharedMemoryClause(StartLoc, EndLoc);
+ break;
+ case OMPC_reverse_offload:
+ Res = ActOnOpenMPReverseOffloadClause(StartLoc, EndLoc);
+ break;
+ case OMPC_dynamic_allocators:
+ Res = ActOnOpenMPDynamicAllocatorsClause(StartLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -9205,6 +9448,7 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -9271,6 +9515,21 @@ OMPClause *Sema::ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
return new (Context) OMPUnifiedAddressClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPUnifiedSharedMemoryClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPReverseOffloadClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPDynamicAllocatorsClause(StartLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc,
@@ -9379,6 +9638,10 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_unknown:
case OMPC_uniform:
case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -10409,10 +10672,11 @@ static bool checkOMPArraySectionConstantForReduction(
SingleElement = true;
ArraySizes.push_back(llvm::APSInt::get(1));
} else {
- llvm::APSInt ConstantLengthValue;
- if (!Length->EvaluateAsInt(ConstantLengthValue, Context))
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, Context))
return false;
+ llvm::APSInt ConstantLengthValue = Result.Val.getInt();
SingleElement = (ConstantLengthValue.getSExtValue() == 1);
ArraySizes.push_back(ConstantLengthValue);
}
@@ -10433,9 +10697,12 @@ static bool checkOMPArraySectionConstantForReduction(
// This is an array subscript which has implicit length 1!
ArraySizes.push_back(llvm::APSInt::get(1));
} else {
- llvm::APSInt ConstantLengthValue;
- if (!Length->EvaluateAsInt(ConstantLengthValue, Context) ||
- ConstantLengthValue.getSExtValue() != 1)
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, Context))
+ return false;
+
+ llvm::APSInt ConstantLengthValue = Result.Val.getInt();
+ if (ConstantLengthValue.getSExtValue() != 1)
return false;
ArraySizes.push_back(ConstantLengthValue);
@@ -11958,9 +12225,11 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
// If there is a lower bound that does not evaluates to zero, we are not
// covering the whole dimension.
if (LowerBound) {
- llvm::APSInt ConstLowerBound;
- if (!LowerBound->EvaluateAsInt(ConstLowerBound, SemaRef.getASTContext()))
+ Expr::EvalResult Result;
+ if (!LowerBound->EvaluateAsInt(Result, SemaRef.getASTContext()))
return false; // Can't get the integer value as a constant.
+
+ llvm::APSInt ConstLowerBound = Result.Val.getInt();
if (ConstLowerBound.getSExtValue())
return true;
}
@@ -11980,10 +12249,11 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
if (!CATy)
return false;
- llvm::APSInt ConstLength;
- if (!Length->EvaluateAsInt(ConstLength, SemaRef.getASTContext()))
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, SemaRef.getASTContext()))
return false; // Can't get the integer value as a constant.
+ llvm::APSInt ConstLength = Result.Val.getInt();
return CATy->getSize().getSExtValue() != ConstLength.getSExtValue();
}
@@ -12014,10 +12284,11 @@ static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
}
// Check if the length evaluates to 1.
- llvm::APSInt ConstLength;
- if (!Length->EvaluateAsInt(ConstLength, SemaRef.getASTContext()))
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, SemaRef.getASTContext()))
return false; // Can't get the integer value as a constant.
+ llvm::APSInt ConstLength = Result.Val.getInt();
return ConstLength.getSExtValue() != 1;
}
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index f4ec04a1b2..4c7d61d79e 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -1142,8 +1142,9 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
// function yet (because we haven't yet resolved whether this is a static
// or non-static member function). Add it now, on the assumption that this
// is a redeclaration of OldMethod.
- unsigned OldQuals = OldMethod->getTypeQualifiers();
- unsigned NewQuals = NewMethod->getTypeQualifiers();
+ // FIXME: OpenCL: Need to consider address spaces
+ unsigned OldQuals = OldMethod->getTypeQualifiers().getCVRUQualifiers();
+ unsigned NewQuals = NewMethod->getTypeQualifiers().getCVRUQualifiers();
if (!getLangOpts().CPlusPlus14 && NewMethod->isConstexpr() &&
!isa<CXXConstructorDecl>(NewMethod))
NewQuals |= Qualifiers::Const;
@@ -1418,7 +1419,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
bool AllowObjCWritebackConversion
= getLangOpts().ObjCAutoRefCount &&
(Action == AA_Passing || Action == AA_Sending);
- if (getLangOpts().ObjC1)
+ if (getLangOpts().ObjC)
CheckObjCBridgeRelatedConversions(From->getBeginLoc(), ToType,
From->getType(), From);
ICS = ::TryImplicitConversion(*this, From, ToType,
@@ -2395,7 +2396,7 @@ static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType,
bool &IncompatibleObjC) {
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
return false;
// The set of qualifiers on the type we're converting from.
@@ -2823,8 +2824,9 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
return;
}
- unsigned FromQuals = FromFunction->getTypeQuals(),
- ToQuals = ToFunction->getTypeQuals();
+ // FIXME: OpenCL: Need to consider address spaces
+ unsigned FromQuals = FromFunction->getTypeQuals().getCVRUQualifiers();
+ unsigned ToQuals = ToFunction->getTypeQuals().getCVRUQualifiers();
if (FromQuals != ToQuals) {
PDiag << ft_qualifer_mismatch << ToQuals << FromQuals;
return;
@@ -3516,7 +3518,7 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
static ImplicitConversionSequence::CompareKind
compareConversionFunctions(Sema &S, FunctionDecl *Function1,
FunctionDecl *Function2) {
- if (!S.getLangOpts().ObjC1 || !S.getLangOpts().CPlusPlus11)
+ if (!S.getLangOpts().ObjC || !S.getLangOpts().CPlusPlus11)
return ImplicitConversionSequence::Indistinguishable;
// Objective-C++:
@@ -3900,6 +3902,31 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
S.Context.getTypeSize(SCS1.getToType(2)))
return ImplicitConversionSequence::Better;
+ // Prefer a compatible vector conversion over a lax vector conversion
+ // For example:
+ //
+ // typedef float __v4sf __attribute__((__vector_size__(16)));
+ // void f(vector float);
+ // void f(vector signed int);
+ // int main() {
+ // __v4sf a;
+ // f(a);
+ // }
+ // Here, we'd like to choose f(vector float) and not
+ // report an ambiguous call error
+ if (SCS1.Second == ICK_Vector_Conversion &&
+ SCS2.Second == ICK_Vector_Conversion) {
+ bool SCS1IsCompatibleVectorConversion = S.Context.areCompatibleVectorTypes(
+ SCS1.getFromType(), SCS1.getToType(2));
+ bool SCS2IsCompatibleVectorConversion = S.Context.areCompatibleVectorTypes(
+ SCS2.getFromType(), SCS2.getToType(2));
+
+ if (SCS1IsCompatibleVectorConversion != SCS2IsCompatibleVectorConversion)
+ return SCS1IsCompatibleVectorConversion
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+
return ImplicitConversionSequence::Indistinguishable;
}
@@ -5040,9 +5067,15 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
QualType ClassType = S.Context.getTypeDeclType(ActingContext);
// [class.dtor]p2: A destructor can be invoked for a const, volatile or
// const volatile object.
- unsigned Quals = isa<CXXDestructorDecl>(Method) ?
- Qualifiers::Const | Qualifiers::Volatile : Method->getTypeQualifiers();
- QualType ImplicitParamType = S.Context.getCVRQualifiedType(ClassType, Quals);
+ Qualifiers Quals;
+ if (isa<CXXDestructorDecl>(Method)) {
+ Quals.addConst();
+ Quals.addVolatile();
+ } else {
+ Quals = Method->getTypeQualifiers();
+ }
+
+ QualType ImplicitParamType = S.Context.getQualifiedType(ClassType, Quals);
// Set up the conversion sequence as a "bad" conversion, to allow us
// to exit early.
@@ -5108,7 +5141,7 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
break;
case RQ_LValue:
- if (!FromClassification.isLValue() && Quals != Qualifiers::Const) {
+ if (!FromClassification.isLValue() && !Quals.hasOnlyConst()) {
// non-const lvalue reference cannot bind to an rvalue
ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, FromType,
ImplicitParamType);
@@ -5224,9 +5257,14 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
From = FromRes.get();
}
- if (!Context.hasSameType(From->getType(), DestType))
- From = ImpCastExprToType(From, DestType, CK_NoOp,
+ if (!Context.hasSameType(From->getType(), DestType)) {
+ if (From->getType().getAddressSpace() != DestType.getAddressSpace())
+ From = ImpCastExprToType(From, DestType, CK_AddressSpaceConversion,
+ From->getValueKind()).get();
+ else
+ From = ImpCastExprToType(From, DestType, CK_NoOp,
From->getValueKind()).get();
+ }
return From;
}
@@ -5444,7 +5482,7 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
if (Notes.empty()) {
// It's a constant expression.
- return Result;
+ return ConstantExpr::Create(S.Context, Result.get());
}
}
@@ -5921,15 +5959,13 @@ static bool IsAcceptableNonMemberOperatorCandidate(ASTContext &Context,
/// \param PartialOverloading true if we are performing "partial" overloading
/// based on an incomplete set of function arguments. This feature is used by
/// code completion.
-void
-Sema::AddOverloadCandidate(FunctionDecl *Function,
- DeclAccessPair FoundDecl,
- ArrayRef<Expr *> Args,
- OverloadCandidateSet &CandidateSet,
- bool SuppressUserConversions,
- bool PartialOverloading,
- bool AllowExplicit,
- ConversionSequenceList EarlyConversions) {
+void Sema::AddOverloadCandidate(FunctionDecl *Function,
+ DeclAccessPair FoundDecl, ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet,
+ bool SuppressUserConversions,
+ bool PartialOverloading, bool AllowExplicit,
+ ADLCallKind IsADLCandidate,
+ ConversionSequenceList EarlyConversions) {
const FunctionProtoType *Proto
= dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
assert(Proto && "Functions without a prototype cannot be overloaded");
@@ -5988,6 +6024,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
Candidate.Function = Function;
Candidate.Viable = true;
Candidate.IsSurrogate = false;
+ Candidate.IsADLCandidate = IsADLCandidate;
Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = Args.size();
@@ -6404,7 +6441,12 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
if (Expr *E = Args[0]) {
// Use the explicit base to restrict the lookup:
ObjectType = E->getType();
- ObjectClassification = E->Classify(Context);
+ // Pointers in the object arguments are implicitly dereferenced, so we
+ // always classify them as l-values.
+ if (!ObjectType.isNull() && ObjectType->isPointerType())
+ ObjectClassification = Expr::Classification::makeSimpleLValue();
+ else
+ ObjectClassification = E->Classify(Context);
} // .. else there is an implicit base.
FunctionArgs = Args.slice(1);
}
@@ -6685,14 +6727,11 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
/// Add a C++ function template specialization as a candidate
/// in the candidate set, using template argument deduction to produce
/// an appropriate function template specialization.
-void
-Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
- DeclAccessPair FoundDecl,
- TemplateArgumentListInfo *ExplicitTemplateArgs,
- ArrayRef<Expr *> Args,
- OverloadCandidateSet& CandidateSet,
- bool SuppressUserConversions,
- bool PartialOverloading) {
+void Sema::AddTemplateOverloadCandidate(
+ FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
+ TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet, bool SuppressUserConversions,
+ bool PartialOverloading, ADLCallKind IsADLCandidate) {
if (!CandidateSet.isNewCandidate(FunctionTemplate))
return;
@@ -6721,6 +6760,7 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
Candidate.Function = FunctionTemplate->getTemplatedDecl();
Candidate.Viable = false;
Candidate.IsSurrogate = false;
+ Candidate.IsADLCandidate = IsADLCandidate;
// Ignore the object argument if there is one, since we don't have an object
// type.
Candidate.IgnoreObjectArgument =
@@ -6742,7 +6782,7 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
assert(Specialization && "Missing function template specialization?");
AddOverloadCandidate(Specialization, FoundDecl, Args, CandidateSet,
SuppressUserConversions, PartialOverloading,
- /*AllowExplicit*/false, Conversions);
+ /*AllowExplicit*/ false, IsADLCandidate, Conversions);
}
/// Check that implicit conversion sequences can be formed for each argument
@@ -8905,16 +8945,20 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
// set.
for (ADLResult::iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
DeclAccessPair FoundDecl = DeclAccessPair::make(*I, AS_none);
+
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
if (ExplicitTemplateArgs)
continue;
- AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet, false,
- PartialOverloading);
- } else
- AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I),
- FoundDecl, ExplicitTemplateArgs,
- Args, CandidateSet, PartialOverloading);
+ AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet,
+ /*SupressUserConversions=*/false, PartialOverloading,
+ /*AllowExplicit=*/false, ADLCallKind::UsesADL);
+ } else {
+ AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I), FoundDecl,
+ ExplicitTemplateArgs, Args, CandidateSet,
+ /*SupressUserConversions=*/false,
+ PartialOverloading, ADLCallKind::UsesADL);
+ }
}
}
@@ -8947,25 +8991,28 @@ static Comparison compareEnableIfAttrs(const Sema &S, const FunctionDecl *Cand1,
auto Cand1Attrs = Cand1->specific_attrs<EnableIfAttr>();
auto Cand2Attrs = Cand2->specific_attrs<EnableIfAttr>();
- auto Cand1I = Cand1Attrs.begin();
llvm::FoldingSetNodeID Cand1ID, Cand2ID;
- for (EnableIfAttr *Cand2A : Cand2Attrs) {
- Cand1ID.clear();
- Cand2ID.clear();
+ for (auto Pair : zip_longest(Cand1Attrs, Cand2Attrs)) {
+ Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
+ Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
// It's impossible for Cand1 to be better than (or equal to) Cand2 if Cand1
- // has fewer enable_if attributes than Cand2.
- auto Cand1A = Cand1I++;
- if (Cand1A == Cand1Attrs.end())
+ // has fewer enable_if attributes than Cand2, and vice versa.
+ if (!Cand1A)
return Comparison::Worse;
+ if (!Cand2A)
+ return Comparison::Better;
- Cand1A->getCond()->Profile(Cand1ID, S.getASTContext(), true);
- Cand2A->getCond()->Profile(Cand2ID, S.getASTContext(), true);
+ Cand1ID.clear();
+ Cand2ID.clear();
+
+ (*Cand1A)->getCond()->Profile(Cand1ID, S.getASTContext(), true);
+ (*Cand2A)->getCond()->Profile(Cand2ID, S.getASTContext(), true);
if (Cand1ID != Cand2ID)
return Comparison::Worse;
}
- return Cand1I == Cand1Attrs.end() ? Comparison::Equal : Comparison::Better;
+ return Comparison::Equal;
}
static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
@@ -9984,7 +10031,7 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
DeductionFailure.getFirstArg()->getNonTypeTemplateArgumentType();
QualType T2 =
DeductionFailure.getSecondArg()->getNonTypeTemplateArgumentType();
- if (!S.Context.hasSameType(T1, T2)) {
+ if (!T1.isNull() && !T2.isNull() && !S.Context.hasSameType(T1, T2)) {
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_inconsistent_deduction_types)
<< ParamD->getDeclName() << *DeductionFailure.getFirstArg() << T1
@@ -10242,7 +10289,8 @@ static void DiagnoseOpenCLExtensionDisabled(Sema &S, OverloadCandidate *Cand) {
FunctionDecl *Callee = Cand->Function;
S.Diag(Callee->getLocation(),
- diag::note_ovl_candidate_disabled_by_extension);
+ diag::note_ovl_candidate_disabled_by_extension)
+ << S.getOpenCLExtensionsFromDeclExtMap(Callee);
}
/// Generates a 'note' diagnostic for an overload candidate. We've
@@ -10996,7 +11044,7 @@ private:
// Note: We explicitly leave Matches unmodified if there isn't a clear best
// option, so we can potentially give the user a better error
- if (!std::all_of(Matches.begin(), Matches.end(), IsBestOrInferiorToBest))
+ if (!llvm::all_of(Matches, IsBestOrInferiorToBest))
return false;
Matches[0] = *Best;
Matches.resize(1);
@@ -11987,7 +12035,8 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
return ExprError();
Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
- ExecConfig);
+ ExecConfig, /*IsExecConfig=*/false,
+ (*Best)->IsADLCandidate);
}
case OR_No_Viable_Function: {
@@ -12039,7 +12088,8 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
FunctionDecl *FDecl = (*Best)->Function;
Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
- ExecConfig);
+ ExecConfig, /*IsExecConfig=*/false,
+ (*Best)->IsADLCandidate);
}
}
@@ -12228,9 +12278,9 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
ResultTy = ResultTy.getNonLValueExprType(Context);
Args[0] = Input;
- CallExpr *TheCall =
- new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(), ArgsArray,
- ResultTy, VK, OpLoc, FPOptions());
+ CallExpr *TheCall = new (Context)
+ CXXOperatorCallExpr(Context, Op, FnExpr.get(), ArgsArray, ResultTy,
+ VK, OpLoc, FPOptions(), Best->IsADLCandidate);
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall, FnDecl))
return ExprError();
@@ -12460,10 +12510,9 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall =
- new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(),
- Args, ResultTy, VK, OpLoc,
- FPFeatures);
+ CXXOperatorCallExpr *TheCall = new (Context)
+ CXXOperatorCallExpr(Context, Op, FnExpr.get(), Args, ResultTy, VK,
+ OpLoc, FPFeatures, Best->IsADLCandidate);
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall,
FnDecl))
@@ -12790,7 +12839,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// Check that the object type isn't more qualified than the
// member function we're calling.
- Qualifiers funcQuals = Qualifiers::fromCVRMask(proto->getTypeQuals());
+ Qualifiers funcQuals = proto->getTypeQuals();
QualType objectType = op->getLHS()->getType();
if (op->getOpcode() == BO_PtrMemI)
@@ -12810,7 +12859,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
CXXMemberCallExpr *call
= new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
- resultType, valueKind, RParenLoc);
+ resultType, valueKind, RParenLoc,
+ proto->getNumParams());
if (CheckCallReturnType(proto->getReturnType(), op->getRHS()->getBeginLoc(),
call, nullptr))
@@ -12960,9 +13010,11 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
ResultType = ResultType.getNonLValueExprType(Context);
assert(Method && "Member call to something that isn't a method?");
+ const auto *Proto = Method->getType()->getAs<FunctionProtoType>();
CXXMemberCallExpr *TheCall =
new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
- ResultType, VK, RParenLoc);
+ ResultType, VK, RParenLoc,
+ Proto->getNumParams());
// Check for a valid return type.
if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
@@ -12982,8 +13034,6 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
// Convert the rest of the arguments
- const FunctionProtoType *Proto =
- Method->getType()->getAs<FunctionProtoType>();
if (ConvertArgumentsForCall(TheCall, MemExpr, Method, Proto, Args,
RParenLoc))
return ExprError();
@@ -13231,29 +13281,14 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (NewFn.isInvalid())
return true;
+ // The number of argument slots to allocate in the call. If we have default
+ // arguments we need to allocate space for them as well. We additionally
+ // need one more slot for the object parameter.
+ unsigned NumArgsSlots = 1 + std::max<unsigned>(Args.size(), NumParams);
+
// Build the full argument list for the method call (the implicit object
// parameter is placed at the beginning of the list).
- SmallVector<Expr *, 8> MethodArgs(Args.size() + 1);
- MethodArgs[0] = Object.get();
- std::copy(Args.begin(), Args.end(), MethodArgs.begin() + 1);
-
- // Once we've built TheCall, all of the expressions are properly
- // owned.
- QualType ResultTy = Method->getReturnType();
- ExprValueKind VK = Expr::getValueKindForType(ResultTy);
- ResultTy = ResultTy.getNonLValueExprType(Context);
-
- CXXOperatorCallExpr *TheCall = new (Context)
- CXXOperatorCallExpr(Context, OO_Call, NewFn.get(), MethodArgs, ResultTy,
- VK, RParenLoc, FPOptions());
-
- if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
- return true;
-
- // We may have default arguments. If so, we need to allocate more
- // slots in the call for them.
- if (Args.size() < NumParams)
- TheCall->setNumArgs(Context, NumParams + 1);
+ SmallVector<Expr *, 8> MethodArgs(NumArgsSlots);
bool IsError = false;
@@ -13265,7 +13300,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
IsError = true;
else
Object = ObjRes;
- TheCall->setArg(0, Object.get());
+ MethodArgs[0] = Object.get();
// Check the argument types.
for (unsigned i = 0; i != NumParams; i++) {
@@ -13294,7 +13329,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
Arg = DefArg.getAs<Expr>();
}
- TheCall->setArg(i + 1, Arg);
+ MethodArgs[i + 1] = Arg;
}
// If this is a variadic call, handle args passed through "...".
@@ -13304,14 +13339,27 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
nullptr);
IsError |= Arg.isInvalid();
- TheCall->setArg(i + 1, Arg.get());
+ MethodArgs[i + 1] = Arg.get();
}
}
- if (IsError) return true;
+ if (IsError)
+ return true;
DiagnoseSentinelCalls(Method, LParenLoc, Args);
+ // Once we've built TheCall, all of the expressions are properly owned.
+ QualType ResultTy = Method->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ CXXOperatorCallExpr *TheCall = new (Context)
+ CXXOperatorCallExpr(Context, OO_Call, NewFn.get(), MethodArgs, ResultTy,
+ VK, RParenLoc, FPOptions());
+
+ if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
+ return true;
+
if (CheckFunctionCall(Method, TheCall, Proto))
return true;
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
index f0d1947582..cc3c25cfb5 100644
--- a/lib/Sema/SemaStmt.cpp
+++ b/lib/Sema/SemaStmt.cpp
@@ -246,7 +246,7 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
// we might want to make a more specific diagnostic. Check for one of these
// cases now.
unsigned DiagID = diag::warn_unused_expr;
- if (const ExprWithCleanups *Temps = dyn_cast<ExprWithCleanups>(E))
+ if (const FullExpr *Temps = dyn_cast<FullExpr>(E))
E = Temps->getSubExpr();
if (const CXXBindTemporaryExpr *TempExpr = dyn_cast<CXXBindTemporaryExpr>(E))
E = TempExpr->getSubExpr();
@@ -462,8 +462,8 @@ Sema::ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHSVal,
return StmtError();
}
- CaseStmt *CS = new (Context)
- CaseStmt(LHSVal.get(), RHSVal.get(), CaseLoc, DotDotDotLoc, ColonLoc);
+ auto *CS = CaseStmt::Create(Context, LHSVal.get(), RHSVal.get(),
+ CaseLoc, DotDotDotLoc, ColonLoc);
getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(CS);
return CS;
}
@@ -472,7 +472,7 @@ Sema::ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHSVal,
void Sema::ActOnCaseStmtBody(Stmt *caseStmt, Stmt *SubStmt) {
DiagnoseUnusedExprResult(SubStmt);
- CaseStmt *CS = static_cast<CaseStmt*>(caseStmt);
+ auto *CS = static_cast<CaseStmt *>(caseStmt);
CS->setSubStmt(SubStmt);
}
@@ -551,8 +551,9 @@ Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt,
false);
Expr *CondExpr = Cond.get().second;
- if (!Diags.isIgnored(diag::warn_comma_operator,
- CondExpr->getExprLoc()))
+ // Only call the CommaVisitor when not C89 due to differences in scope flags.
+ if ((getLangOpts().C99 || getLangOpts().CPlusPlus) &&
+ !Diags.isIgnored(diag::warn_comma_operator, CondExpr->getExprLoc()))
CommaVisitor(*this).Visit(CondExpr);
if (!elseStmt)
@@ -576,9 +577,8 @@ StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
DiagnoseUnusedExprResult(thenStmt);
DiagnoseUnusedExprResult(elseStmt);
- return new (Context)
- IfStmt(Context, IfLoc, IsConstexpr, InitStmt, Cond.get().first,
- Cond.get().second, thenStmt, ElseLoc, elseStmt);
+ return IfStmt::Create(Context, IfLoc, IsConstexpr, InitStmt, Cond.get().first,
+ Cond.get().second, thenStmt, ElseLoc, elseStmt);
}
namespace {
@@ -631,8 +631,8 @@ static bool EqEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs,
/// GetTypeBeforeIntegralPromotion - Returns the pre-promotion type of
/// potentially integral-promoted expression @p expr.
static QualType GetTypeBeforeIntegralPromotion(const Expr *&E) {
- if (const auto *CleanUps = dyn_cast<ExprWithCleanups>(E))
- E = CleanUps->getSubExpr();
+ if (const auto *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr();
while (const auto *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
if (ImpCast->getCastKind() != CK_IntegralCast) break;
E = ImpCast->getSubExpr();
@@ -727,8 +727,7 @@ StmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
setFunctionHasBranchIntoScope();
- SwitchStmt *SS = new (Context)
- SwitchStmt(Context, InitStmt, Cond.get().first, CondExpr);
+ auto *SS = SwitchStmt::Create(Context, InitStmt, Cond.get().first, CondExpr);
getCurFunction()->SwitchStack.push_back(
FunctionScopeInfo::SwitchInfo(SS, false));
return SS;
@@ -946,8 +945,11 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
llvm::APSInt ConstantCondValue;
bool HasConstantCond = false;
if (!HasDependentValue && !TheDefaultStmt) {
- HasConstantCond = CondExpr->EvaluateAsInt(ConstantCondValue, Context,
+ Expr::EvalResult Result;
+ HasConstantCond = CondExpr->EvaluateAsInt(Result, Context,
Expr::SE_AllowSideEffects);
+ if (Result.Val.isInt())
+ ConstantCondValue = Result.Val.getInt();
assert(!HasConstantCond ||
(ConstantCondValue.getBitWidth() == CondWidth &&
ConstantCondValue.isSigned() == CondIsSigned));
@@ -1307,8 +1309,8 @@ StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
if (isa<NullStmt>(Body))
getCurCompoundScope().setHasEmptyLoopBodies();
- return new (Context)
- WhileStmt(Context, CondVal.first, CondVal.second, Body, WhileLoc);
+ return WhileStmt::Create(Context, CondVal.first, CondVal.second, Body,
+ WhileLoc);
}
StmtResult
@@ -1328,6 +1330,11 @@ Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
return StmtError();
Cond = CondResult.get();
+ // Only call the CommaVisitor for C89 due to differences in scope flags.
+ if (Cond && !getLangOpts().C99 && !getLangOpts().CPlusPlus &&
+ !Diags.isIgnored(diag::warn_comma_operator, Cond->getExprLoc()))
+ CommaVisitor(*this).Visit(Cond);
+
DiagnoseUnusedExprResult(Body);
return new (Context) DoStmt(Body, Cond, DoLoc, WhileLoc, CondRParen);
@@ -1409,7 +1416,11 @@ namespace {
void VisitDeclRefExpr(DeclRefExpr *E) {
VarDecl *VD = dyn_cast<VarDecl>(E->getDecl());
- if (!VD) return;
+ if (!VD) {
+ // Don't allow unhandled Decl types.
+ Simple = false;
+ return;
+ }
Ranges.push_back(E->getSourceRange());
@@ -3218,7 +3229,8 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
return StmtError();
RetValExp = ER.get();
}
- return new (Context) ReturnStmt(ReturnLoc, RetValExp, nullptr);
+ return ReturnStmt::Create(Context, ReturnLoc, RetValExp,
+ /* NRVOCandidate=*/nullptr);
}
if (HasDeducedReturnType) {
@@ -3344,8 +3356,8 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
return StmtError();
RetValExp = ER.get();
}
- ReturnStmt *Result = new (Context) ReturnStmt(ReturnLoc, RetValExp,
- NRVOCandidate);
+ auto *Result =
+ ReturnStmt::Create(Context, ReturnLoc, RetValExp, NRVOCandidate);
// If we need to check for the named return value optimization,
// or if we need to infer the return type,
@@ -3574,7 +3586,8 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
return StmtError();
RetValExp = ER.get();
}
- return new (Context) ReturnStmt(ReturnLoc, RetValExp, nullptr);
+ return ReturnStmt::Create(Context, ReturnLoc, RetValExp,
+ /* NRVOCandidate=*/nullptr);
}
// FIXME: Add a flag to the ScopeInfo to indicate whether we're performing
@@ -3669,7 +3682,8 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
}
- Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, nullptr);
+ Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp,
+ /* NRVOCandidate=*/nullptr);
} else if (!RetValExp && !HasDependentReturnType) {
FunctionDecl *FD = getCurFunctionDecl();
@@ -3691,7 +3705,8 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
else
Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/;
- Result = new (Context) ReturnStmt(ReturnLoc);
+ Result = ReturnStmt::Create(Context, ReturnLoc, /* RetExpr=*/nullptr,
+ /* NRVOCandidate=*/nullptr);
} else {
assert(RetValExp || HasDependentReturnType);
const VarDecl *NRVOCandidate = nullptr;
@@ -3744,7 +3759,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
return StmtError();
RetValExp = ER.get();
}
- Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, NRVOCandidate);
+ Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp, NRVOCandidate);
}
// If we need to check for the named return value optimization, save the
diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp
index 359bfb4464..d209266049 100644
--- a/lib/Sema/SemaStmtAsm.cpp
+++ b/lib/Sema/SemaStmtAsm.cpp
@@ -27,6 +27,58 @@
using namespace clang;
using namespace sema;
+/// Remove the upper-level LValueToRValue cast from an expression.
+static void removeLValueToRValueCast(Expr *E) {
+ Expr *Parent = E;
+ Expr *ExprUnderCast = nullptr;
+ SmallVector<Expr *, 8> ParentsToUpdate;
+
+ while (true) {
+ ParentsToUpdate.push_back(Parent);
+ if (auto *ParenE = dyn_cast<ParenExpr>(Parent)) {
+ Parent = ParenE->getSubExpr();
+ continue;
+ }
+
+ Expr *Child = nullptr;
+ CastExpr *ParentCast = dyn_cast<CastExpr>(Parent);
+ if (ParentCast)
+ Child = ParentCast->getSubExpr();
+ else
+ return;
+
+ if (auto *CastE = dyn_cast<CastExpr>(Child))
+ if (CastE->getCastKind() == CK_LValueToRValue) {
+ ExprUnderCast = CastE->getSubExpr();
+ // LValueToRValue cast inside GCCAsmStmt requires an explicit cast.
+ ParentCast->setSubExpr(ExprUnderCast);
+ break;
+ }
+ Parent = Child;
+ }
+
+ // Update parent expressions to have same ValueType as the underlying.
+ assert(ExprUnderCast &&
+ "Should be reachable only if LValueToRValue cast was found!");
+ auto ValueKind = ExprUnderCast->getValueKind();
+ for (Expr *E : ParentsToUpdate)
+ E->setValueKind(ValueKind);
+}
+
+/// Emit a warning about usage of "noop"-like casts for lvalues (GNU extension)
+/// and fix the argument with removing LValueToRValue cast from the expression.
+static void emitAndFixInvalidAsmCastLValue(const Expr *LVal, Expr *BadArgument,
+ Sema &S) {
+ if (!S.getLangOpts().HeinousExtensions) {
+ S.Diag(LVal->getBeginLoc(), diag::err_invalid_asm_cast_lvalue)
+ << BadArgument->getSourceRange();
+ } else {
+ S.Diag(LVal->getBeginLoc(), diag::warn_invalid_asm_cast_lvalue)
+ << BadArgument->getSourceRange();
+ }
+ removeLValueToRValueCast(BadArgument);
+}
+
/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently
/// ignore "noop" casts in places where an lvalue is required by an inline asm.
/// We emulate this behavior when -fheinous-gnu-extensions is specified, but
@@ -34,7 +86,7 @@ using namespace sema;
///
/// This method checks to see if the argument is an acceptable l-value and
/// returns false if it is a case we can handle.
-static bool CheckAsmLValue(const Expr *E, Sema &S) {
+static bool CheckAsmLValue(Expr *E, Sema &S) {
// Type dependent expressions will be checked during instantiation.
if (E->isTypeDependent())
return false;
@@ -46,12 +98,7 @@ static bool CheckAsmLValue(const Expr *E, Sema &S) {
// are supposed to allow.
const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
if (E != E2 && E2->isLValue()) {
- if (!S.getLangOpts().HeinousExtensions)
- S.Diag(E2->getBeginLoc(), diag::err_invalid_asm_cast_lvalue)
- << E->getSourceRange();
- else
- S.Diag(E2->getBeginLoc(), diag::warn_invalid_asm_cast_lvalue)
- << E->getSourceRange();
+ emitAndFixInvalidAsmCastLValue(E2, E, S);
// Accept, even if we emitted an error diagnostic.
return false;
}
@@ -264,13 +311,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
break;
case Expr::MLV_LValueCast: {
const Expr *LVal = OutputExpr->IgnoreParenNoopCasts(Context);
- if (!getLangOpts().HeinousExtensions) {
- Diag(LVal->getBeginLoc(), diag::err_invalid_asm_cast_lvalue)
- << OutputExpr->getSourceRange();
- } else {
- Diag(LVal->getBeginLoc(), diag::warn_invalid_asm_cast_lvalue)
- << OutputExpr->getSourceRange();
- }
+ emitAndFixInvalidAsmCastLValue(LVal, OutputExpr, *this);
// Accept, even if we emitted an error diagnostic.
break;
}
@@ -337,11 +378,12 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
<< InputExpr->getSourceRange());
} else if (Info.requiresImmediateConstant() && !Info.allowsRegister()) {
if (!InputExpr->isValueDependent()) {
- llvm::APSInt Result;
- if (!InputExpr->EvaluateAsInt(Result, Context))
+ Expr::EvalResult EVResult;
+ if (!InputExpr->EvaluateAsInt(EVResult, Context))
return StmtError(
Diag(InputExpr->getBeginLoc(), diag::err_asm_immediate_expected)
<< Info.getConstraintStr() << InputExpr->getSourceRange());
+ llvm::APSInt Result = EVResult.Val.getInt();
if (!Info.isValidAsmImmediate(Result))
return StmtError(Diag(InputExpr->getBeginLoc(),
diag::err_invalid_asm_value_for_constraint)
diff --git a/lib/Sema/SemaStmtAttr.cpp b/lib/Sema/SemaStmtAttr.cpp
index c720aff064..353cd60c4a 100644
--- a/lib/Sema/SemaStmtAttr.cpp
+++ b/lib/Sema/SemaStmtAttr.cpp
@@ -16,7 +16,6 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Lookup.h"
-#include "clang/Sema/LoopHint.h"
#include "clang/Sema/ScopeInfo.h"
#include "llvm/ADT/StringExtras.h"
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index 61e51de349..c1763528df 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -3052,8 +3052,42 @@ static Expr *lookThroughRangesV3Condition(Preprocessor &PP, Expr *Cond) {
return Cond;
}
+namespace {
+
+// A PrinterHelper that prints more helpful diagnostics for some sub-expressions
+// within failing boolean expression, such as substituting template parameters
+// for actual types.
+class FailedBooleanConditionPrinterHelper : public PrinterHelper {
+public:
+ explicit FailedBooleanConditionPrinterHelper(const PrintingPolicy &P)
+ : Policy(P) {}
+
+ bool handledStmt(Stmt *E, raw_ostream &OS) override {
+ const auto *DR = dyn_cast<DeclRefExpr>(E);
+ if (DR && DR->getQualifier()) {
+ // If this is a qualified name, expand the template arguments in nested
+ // qualifiers.
+ DR->getQualifier()->print(OS, Policy, true);
+ // Then print the decl itself.
+ const ValueDecl *VD = DR->getDecl();
+ OS << VD->getName();
+ if (const auto *IV = dyn_cast<VarTemplateSpecializationDecl>(VD)) {
+ // This is a template variable, print the expanded template arguments.
+ printTemplateArgumentList(OS, IV->getTemplateArgs().asArray(), Policy);
+ }
+ return true;
+ }
+ return false;
+ }
+
+private:
+ const PrintingPolicy Policy;
+};
+
+} // end anonymous namespace
+
std::pair<Expr *, std::string>
-Sema::findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond) {
+Sema::findFailedBooleanCondition(Expr *Cond) {
Cond = lookThroughRangesV3Condition(PP, Cond);
// Separate out all of the terms in a conjunction.
@@ -3082,18 +3116,14 @@ Sema::findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond) {
break;
}
}
-
- if (!FailedCond) {
- if (!AllowTopLevelCond)
- return { nullptr, "" };
-
+ if (!FailedCond)
FailedCond = Cond->IgnoreParenImpCasts();
- }
std::string Description;
{
llvm::raw_string_ostream Out(Description);
- FailedCond->printPretty(Out, nullptr, getPrintingPolicy());
+ FailedBooleanConditionPrinterHelper Helper(getPrintingPolicy());
+ FailedCond->printPretty(Out, &Helper, getPrintingPolicy());
}
return { FailedCond, Description };
}
@@ -3177,9 +3207,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
Expr *FailedCond;
std::string FailedDescription;
std::tie(FailedCond, FailedDescription) =
- findFailedBooleanCondition(
- TemplateArgs[0].getSourceExpression(),
- /*AllowTopLevelCond=*/true);
+ findFailedBooleanCondition(TemplateArgs[0].getSourceExpression());
// Remove the old SFINAE diagnostic.
PartialDiagnosticAt OldDiag =
@@ -4434,7 +4462,7 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
// If the argument type is dependent, instantiate it now based
// on the previously-computed template arguments.
- if (ArgType->getType()->isDependentType()) {
+ if (ArgType->getType()->isInstantiationDependentType()) {
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
Param, Template, Converted,
SourceRange(TemplateLoc, RAngleLoc));
@@ -8107,7 +8135,7 @@ bool Sema::CheckFunctionTemplateSpecialization(
if (OldMD && OldMD->isConst()) {
const FunctionProtoType *FPT = FT->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- EPI.TypeQuals |= Qualifiers::Const;
+ EPI.TypeQuals.addConst();
FT = Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI);
}
@@ -9173,10 +9201,8 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
if (!HasNoEffect) {
// Instantiate static data member or variable template.
Prev->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
- if (PrevTemplate) {
- // Merge attributes.
- ProcessDeclAttributeList(S, Prev, D.getDeclSpec().getAttributes());
- }
+ // Merge attributes.
+ ProcessDeclAttributeList(S, Prev, D.getDeclSpec().getAttributes());
if (TSK == TSK_ExplicitInstantiationDefinition)
InstantiateVariableDefinition(D.getIdentifierLoc(), Prev);
}
@@ -9629,7 +9655,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
Expr *FailedCond;
std::string FailedDescription;
std::tie(FailedCond, FailedDescription) =
- findFailedBooleanCondition(Cond, /*AllowTopLevelCond=*/true);
+ findFailedBooleanCondition(Cond);
Diag(FailedCond->getExprLoc(),
diag::err_typename_nested_not_found_requirement)
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index 192ab8eb24..155d842c58 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -178,6 +178,8 @@ getDeducedParameterFromExpr(TemplateDeductionInfo &Info, Expr *E) {
while (true) {
if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E))
E = IC->getSubExpr();
+ else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(E))
+ E = CE->getSubExpr();
else if (SubstNonTypeTemplateParmExpr *Subst =
dyn_cast<SubstNonTypeTemplateParmExpr>(E))
E = Subst->getReplacement();
@@ -3076,7 +3078,7 @@ Sema::SubstituteExplicitTemplateArguments(
// "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
// and the end of the function-definition, member-declarator, or
// declarator.
- unsigned ThisTypeQuals = 0;
+ Qualifiers ThisTypeQuals;
CXXRecordDecl *ThisContext = nullptr;
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) {
ThisContext = Method->getParent();
@@ -4655,8 +4657,7 @@ AddImplicitObjectParameterType(ASTContext &Context,
// The standard doesn't say explicitly, but we pick the appropriate kind of
// reference type based on [over.match.funcs]p4.
QualType ArgTy = Context.getTypeDeclType(Method->getParent());
- ArgTy = Context.getQualifiedType(ArgTy,
- Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+ ArgTy = Context.getQualifiedType(ArgTy, Method->getTypeQualifiers());
if (Method->getRefQualifier() == RQ_RValue)
ArgTy = Context.getRValueReferenceType(ArgTy);
else
@@ -5225,6 +5226,8 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
while (true) {
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
E = ICE->getSubExpr();
+ else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(E))
+ E = CE->getSubExpr();
else if (const SubstNonTypeTemplateParmExpr *Subst =
dyn_cast<SubstNonTypeTemplateParmExpr>(E))
E = Subst->getReplacement();
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index 5666cf04a2..96abeed824 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -675,7 +675,7 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
// context, depending on what else is on the stack.
if (isa<TypeAliasTemplateDecl>(Active->Entity))
break;
- // Fall through.
+ LLVM_FALLTHROUGH;
case CodeSynthesisContext::DefaultFunctionArgumentInstantiation:
case CodeSynthesisContext::ExceptionSpecInstantiation:
// This is a template instantiation, so there is no SFINAE.
@@ -907,7 +907,7 @@ namespace {
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals,
+ Qualifiers ThisTypeQuals,
Fn TransformExceptionSpec);
ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
@@ -1167,7 +1167,7 @@ TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
if (!E->isTypeDependent())
return E;
- return getSema().BuildPredefinedExpr(E->getLocation(), E->getIdentType());
+ return getSema().BuildPredefinedExpr(E->getLocation(), E->getIdentKind());
}
ExprResult
@@ -1427,7 +1427,7 @@ template<typename Fn>
QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals,
+ Qualifiers ThisTypeQuals,
Fn TransformExceptionSpec) {
// We need a local instantiation scope for this function prototype.
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
@@ -1666,7 +1666,7 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals) {
+ Qualifiers ThisTypeQuals) {
assert(!CodeSynthesisContexts.empty() &&
"Cannot perform an instantiation without some context on the "
"instantiation stack");
@@ -2148,7 +2148,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
NamedDecl *ND = dyn_cast<NamedDecl>(I->NewDecl);
CXXRecordDecl *ThisContext =
dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
- CXXThisScopeRAII ThisScope(*this, ThisContext, /*TypeQuals*/0,
+ CXXThisScopeRAII ThisScope(*this, ThisContext, Qualifiers(),
ND && ND->isCXXInstanceMember());
Attr *NewAttr =
@@ -2343,7 +2343,7 @@ bool Sema::InstantiateInClassInitializer(
// Instantiate the initializer.
ActOnStartCXXInClassMemberInitializer();
- CXXThisScopeRAII ThisScope(*this, Instantiation->getParent(), /*TypeQuals=*/0);
+ CXXThisScopeRAII ThisScope(*this, Instantiation->getParent(), Qualifiers());
ExprResult NewInit = SubstInitializer(OldInit, TemplateArgs,
/*CXXDirectInit=*/false);
@@ -2574,10 +2574,14 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
for (auto *D : Instantiation->decls()) {
bool SuppressNew = false;
if (auto *Function = dyn_cast<FunctionDecl>(D)) {
- if (FunctionDecl *Pattern
- = Function->getInstantiatedFromMemberFunction()) {
- MemberSpecializationInfo *MSInfo
- = Function->getMemberSpecializationInfo();
+ if (FunctionDecl *Pattern =
+ Function->getInstantiatedFromMemberFunction()) {
+
+ if (Function->hasAttr<ExcludeFromExplicitInstantiationAttr>())
+ continue;
+
+ MemberSpecializationInfo *MSInfo =
+ Function->getMemberSpecializationInfo();
assert(MSInfo && "No member specialization information?");
if (MSInfo->getTemplateSpecializationKind()
== TSK_ExplicitSpecialization)
@@ -2618,6 +2622,9 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
continue;
if (Var->isStaticDataMember()) {
+ if (Var->hasAttr<ExcludeFromExplicitInstantiationAttr>())
+ continue;
+
MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
assert(MSInfo && "No member specialization information?");
if (MSInfo->getTemplateSpecializationKind()
@@ -2649,6 +2656,9 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
}
}
} else if (auto *Record = dyn_cast<CXXRecordDecl>(D)) {
+ if (Record->hasAttr<ExcludeFromExplicitInstantiationAttr>())
+ continue;
+
// Always skip the injected-class-name, along with any
// redeclarations of nested classes, since both would cause us
// to try to instantiate the members of a class twice.
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 5f2b05c089..31353e45ba 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -295,7 +295,7 @@ static void instantiateOMPDeclareSimdDeclAttr(
PVD, FD->getParamDecl(PVD->getFunctionScopeIndex()));
return S.SubstExpr(E, TemplateArgs);
}
- Sema::CXXThisScopeRAII ThisScope(S, ThisContext, /*TypeQuals=*/0,
+ Sema::CXXThisScopeRAII ThisScope(S, ThisContext, Qualifiers(),
FD->isCXXInstanceMember());
return S.SubstExpr(E, TemplateArgs);
};
@@ -355,7 +355,7 @@ void Sema::InstantiateAttrsForDecl(
// applicable to template declaration, we'll need to add them here.
CXXThisScopeRAII ThisScope(
*this, dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext()),
- /*TypeQuals*/ 0, ND->isCXXInstanceMember());
+ Qualifiers(), ND->isCXXInstanceMember());
Attr *NewAttr = sema::instantiateTemplateAttributeForDecl(
TmplAttr, Context, *this, TemplateArgs);
@@ -365,6 +365,20 @@ void Sema::InstantiateAttrsForDecl(
}
}
+static Sema::RetainOwnershipKind
+attrToRetainOwnershipKind(const Attr *A) {
+ switch (A->getKind()) {
+ case clang::attr::CFConsumed:
+ return Sema::RetainOwnershipKind::CF;
+ case clang::attr::OSConsumed:
+ return Sema::RetainOwnershipKind::OS;
+ case clang::attr::NSConsumed:
+ return Sema::RetainOwnershipKind::NS;
+ default:
+ llvm_unreachable("Wrong argument supplied");
+ }
+}
+
void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Tmpl, Decl *New,
LateInstantiatedAttrVec *LateAttrs,
@@ -438,11 +452,12 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
continue;
}
- if (isa<NSConsumedAttr>(TmplAttr) || isa<CFConsumedAttr>(TmplAttr)) {
- AddNSConsumedAttr(TmplAttr->getRange(), New,
- TmplAttr->getSpellingListIndex(),
- isa<NSConsumedAttr>(TmplAttr),
- /*template instantiation*/ true);
+ if (isa<NSConsumedAttr>(TmplAttr) || isa<OSConsumedAttr>(TmplAttr) ||
+ isa<CFConsumedAttr>(TmplAttr)) {
+ AddXConsumedAttr(New, TmplAttr->getRange(),
+ TmplAttr->getSpellingListIndex(),
+ attrToRetainOwnershipKind(TmplAttr),
+ /*template instantiation=*/true);
continue;
}
@@ -459,7 +474,7 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
NamedDecl *ND = dyn_cast<NamedDecl>(New);
CXXRecordDecl *ThisContext =
dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
- CXXThisScopeRAII ThisScope(*this, ThisContext, /*TypeQuals*/0,
+ CXXThisScopeRAII ThisScope(*this, ThisContext, Qualifiers(),
ND && ND->isCXXInstanceMember());
Attr *NewAttr = sema::instantiateTemplateAttribute(TmplAttr, Context,
@@ -748,6 +763,9 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D,
Var->setImplicit(D->isImplicit());
+ if (Var->isStaticLocal())
+ SemaRef.CheckStaticLocalForDllExport(Var);
+
return Var;
}
@@ -1799,7 +1817,9 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
// If the original function was part of a friend declaration,
// inherit its namespace state and add it to the owner.
if (isFriend) {
- PrincipalDecl->setObjectOfFriendDecl();
+ Function->setObjectOfFriendDecl();
+ if (FunctionTemplateDecl *FT = Function->getDescribedFunctionTemplate())
+ FT->setObjectOfFriendDecl();
DC->makeDeclVisibleInContext(PrincipalDecl);
bool QueuedInstantiation = false;
@@ -2684,26 +2704,28 @@ Decl *TemplateDeclInstantiator::VisitUsingPackDecl(UsingPackDecl *D) {
}
Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
- ClassScopeFunctionSpecializationDecl *Decl) {
+ ClassScopeFunctionSpecializationDecl *Decl) {
CXXMethodDecl *OldFD = Decl->getSpecialization();
CXXMethodDecl *NewFD =
cast_or_null<CXXMethodDecl>(VisitCXXMethodDecl(OldFD, nullptr, true));
if (!NewFD)
return nullptr;
- LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName,
- Sema::ForExternalRedeclaration);
-
- TemplateArgumentListInfo TemplateArgs;
- TemplateArgumentListInfo *TemplateArgsPtr = nullptr;
+ TemplateArgumentListInfo ExplicitTemplateArgs;
+ TemplateArgumentListInfo *ExplicitTemplateArgsPtr = nullptr;
if (Decl->hasExplicitTemplateArgs()) {
- TemplateArgs = Decl->templateArgs();
- TemplateArgsPtr = &TemplateArgs;
+ if (SemaRef.Subst(Decl->templateArgs().getArgumentArray(),
+ Decl->templateArgs().size(), ExplicitTemplateArgs,
+ TemplateArgs))
+ return nullptr;
+ ExplicitTemplateArgsPtr = &ExplicitTemplateArgs;
}
+ LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName,
+ Sema::ForExternalRedeclaration);
SemaRef.LookupQualifiedName(Previous, SemaRef.CurContext);
- if (SemaRef.CheckFunctionTemplateSpecialization(NewFD, TemplateArgsPtr,
- Previous)) {
+ if (SemaRef.CheckFunctionTemplateSpecialization(
+ NewFD, ExplicitTemplateArgsPtr, Previous)) {
NewFD->setInvalidDecl();
return NewFD;
}
@@ -2800,7 +2822,7 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
cast<DeclRefExpr>(D->getCombinerOut())->getDecl(),
cast<DeclRefExpr>(NewDRD->getCombinerOut())->getDecl());
auto *ThisContext = dyn_cast_or_null<CXXRecordDecl>(Owner);
- Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, /*TypeQuals*/ 0,
+ Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, Qualifiers(),
ThisContext);
SubstCombiner = SemaRef.SubstExpr(D->getCombiner(), TemplateArgs).get();
SemaRef.ActOnOpenMPDeclareReductionCombinerEnd(NewDRD, SubstCombiner);
@@ -3419,7 +3441,7 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
assert(Params.empty() && "parameter vector is non-empty at start");
CXXRecordDecl *ThisContext = nullptr;
- unsigned ThisTypeQuals = 0;
+ Qualifiers ThisTypeQuals;
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
ThisContext = cast<CXXRecordDecl>(Owner);
ThisTypeQuals = Method->getTypeQualifiers();
diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp
index 7d1d4c5d7e..3338cec5eb 100644
--- a/lib/Sema/SemaTemplateVariadic.cpp
+++ b/lib/Sema/SemaTemplateVariadic.cpp
@@ -976,6 +976,7 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
PDiag(diag::note_parameter_pack_here));
ParameterPack = Corrected.getCorrectionDecl();
}
+ break;
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index 27dbf70498..bd4a0e1407 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -116,6 +116,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_Pascal: \
case ParsedAttr::AT_SwiftCall: \
case ParsedAttr::AT_VectorCall: \
+ case ParsedAttr::AT_AArch64VectorPcs: \
case ParsedAttr::AT_MSABI: \
case ParsedAttr::AT_SysVABI: \
case ParsedAttr::AT_Pcs: \
@@ -182,11 +183,15 @@ namespace {
SmallVector<TypeAttrPair, 8> AttrsForTypes;
bool AttrsForTypesSorted = true;
+ /// Flag to indicate we parsed a noderef attribute. This is used for
+ /// validating that noderef was used on a pointer or array.
+ bool parsedNoDeref;
+
public:
TypeProcessingState(Sema &sema, Declarator &declarator)
- : sema(sema), declarator(declarator),
- chunkIndex(declarator.getNumTypeObjects()),
- trivial(true), hasSavedAttrs(false) {}
+ : sema(sema), declarator(declarator),
+ chunkIndex(declarator.getNumTypeObjects()), trivial(true),
+ hasSavedAttrs(false), parsedNoDeref(false) {}
Sema &getSema() const {
return sema;
@@ -277,6 +282,10 @@ namespace {
llvm_unreachable("no Attr* for AttributedType*");
}
+ void setParsedNoDeref(bool parsed) { parsedNoDeref = parsed; }
+
+ bool didParseNoDeref() const { return parsedNoDeref; }
+
~TypeProcessingState() {
if (trivial) return;
@@ -1864,8 +1873,7 @@ static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
}
static std::string getFunctionQualifiersAsString(const FunctionProtoType *FnTy){
- std::string Quals =
- Qualifiers::fromCVRMask(FnTy->getTypeQuals()).getAsString();
+ std::string Quals = FnTy->getTypeQuals().getAsString();
switch (FnTy->getRefQualifier()) {
case RQ_None:
@@ -1907,7 +1915,7 @@ static bool checkQualifiedFunction(Sema &S, QualType T, SourceLocation Loc,
QualifiedFunctionKind QFK) {
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
const FunctionProtoType *FPT = T->getAs<FunctionProtoType>();
- if (!FPT || (FPT->getTypeQuals() == 0 && FPT->getRefQualifier() == RQ_None))
+ if (!FPT || (FPT->getTypeQuals().empty() && FPT->getRefQualifier() == RQ_None))
return false;
S.Diag(Loc, diag::err_compound_qualified_function_type)
@@ -3886,6 +3894,11 @@ static bool hasOuterPointerLikeChunk(const Declarator &D, unsigned endIndex) {
return false;
}
+static bool IsNoDerefableChunk(DeclaratorChunk Chunk) {
+ return (Chunk.Kind == DeclaratorChunk::Pointer ||
+ Chunk.Kind == DeclaratorChunk::Array);
+}
+
template<typename AttrT>
static AttrT *createSimpleAttr(ASTContext &Ctx, ParsedAttr &Attr) {
Attr.setUsedAsTypeAttr();
@@ -3936,7 +3949,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
bool IsQualifiedFunction = T->isFunctionProtoType() &&
- (T->castAs<FunctionProtoType>()->getTypeQuals() != 0 ||
+ (!T->castAs<FunctionProtoType>()->getTypeQuals().empty() ||
T->castAs<FunctionProtoType>()->getRefQualifier() != RQ_None);
// If T is 'decltype(auto)', the only declarators we can have are parens
@@ -4279,6 +4292,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
}
+ bool ExpectNoDerefChunk =
+ state.getCurrentAttributes().hasAttribute(ParsedAttr::AT_NoDeref);
+
// Walk the DeclTypeInfo, building the recursive type as we go.
// DeclTypeInfos are ordered from the identifier out, which is
// opposite of what we want :).
@@ -4324,7 +4340,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
inferPointerNullability(SimplePointerKind::Pointer, DeclType.Loc,
DeclType.EndLoc, DeclType.getAttrs());
- if (LangOpts.ObjC1 && T->getAs<ObjCObjectType>()) {
+ if (LangOpts.ObjC && T->getAs<ObjCObjectType>()) {
T = Context.getObjCObjectPointerType(T);
if (DeclType.Ptr.TypeQuals)
T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
@@ -4682,7 +4698,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
EPI.ExtInfo = EI;
EPI.Variadic = FTI.isVariadic;
EPI.HasTrailingReturn = FTI.hasTrailingReturnType();
- EPI.TypeQuals = FTI.TypeQuals;
+ EPI.TypeQuals.addCVRUQualifiers(FTI.TypeQuals);
EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None
: FTI.RefQualifierIsLValueRef? RQ_LValue
: RQ_RValue;
@@ -4809,7 +4825,24 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
Exceptions,
EPI.ExceptionSpec);
- T = Context.getFunctionType(T, ParamTys, EPI);
+ const auto &Spec = D.getCXXScopeSpec();
+ // OpenCLCPlusPlus: A class member function has an address space.
+ if (state.getSema().getLangOpts().OpenCLCPlusPlus &&
+ ((!Spec.isEmpty() &&
+ Spec.getScopeRep()->getKind() == NestedNameSpecifier::TypeSpec) ||
+ state.getDeclarator().getContext() ==
+ DeclaratorContext::MemberContext)) {
+ LangAS CurAS = EPI.TypeQuals.getAddressSpace();
+ // If a class member function's address space is not set, set it to
+ // __generic.
+ LangAS AS =
+ (CurAS == LangAS::Default ? LangAS::opencl_generic : CurAS);
+ EPI.TypeQuals.addAddressSpace(AS);
+ T = Context.getFunctionType(T, ParamTys, EPI);
+ T = state.getSema().Context.getAddrSpaceQualType(T, AS);
+ } else {
+ T = Context.getFunctionType(T, ParamTys, EPI);
+ }
}
break;
}
@@ -4888,8 +4921,22 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// See if there are any attributes on this declarator chunk.
processTypeAttrs(state, T, TAL_DeclChunk, DeclType.getAttrs());
+
+ if (DeclType.Kind != DeclaratorChunk::Paren) {
+ if (ExpectNoDerefChunk) {
+ if (!IsNoDerefableChunk(DeclType))
+ S.Diag(DeclType.Loc, diag::warn_noderef_on_non_pointer_or_array);
+ ExpectNoDerefChunk = false;
+ }
+
+ ExpectNoDerefChunk = state.didParseNoDeref();
+ }
}
+ if (ExpectNoDerefChunk)
+ S.Diag(state.getDeclarator().getBeginLoc(),
+ diag::warn_noderef_on_non_pointer_or_array);
+
// GNU warning -Wstrict-prototypes
// Warn if a function declaration is without a prototype.
// This warning is issued for all kinds of unprototyped function
@@ -5000,7 +5047,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Strip the cv-qualifiers and ref-qualifiers from the type.
FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
- EPI.TypeQuals = 0;
+ EPI.TypeQuals.removeCVRQualifiers();
EPI.RefQualifier = RQ_None;
T = Context.getFunctionType(FnTy->getReturnType(), FnTy->getParamTypes(),
@@ -5238,7 +5285,7 @@ TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
TypeSourceInfo *ReturnTypeInfo = nullptr;
QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy);
if (ownership != Qualifiers::OCL_None)
transferARCOwnership(state, declSpecTy, ownership);
@@ -6653,6 +6700,8 @@ static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
return createSimpleAttr<SwiftCallAttr>(Ctx, Attr);
case ParsedAttr::AT_VectorCall:
return createSimpleAttr<VectorCallAttr>(Ctx, Attr);
+ case ParsedAttr::AT_AArch64VectorPcs:
+ return createSimpleAttr<AArch64VectorPcsAttr>(Ctx, Attr);
case ParsedAttr::AT_Pcs: {
// The attribute may have had a fixit applied where we treated an
// identifier as a string literal. The contents of the string are valid,
@@ -7177,7 +7226,8 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
bool IsPointee =
ChunkIndex > 0 &&
(D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::Pointer ||
- D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::BlockPointer);
+ D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::BlockPointer ||
+ D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::Reference);
bool IsFuncReturnType =
ChunkIndex > 0 &&
D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::Function;
@@ -7197,10 +7247,13 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
!IsPointee) ||
// Do not deduce addr space of the void type, e.g. in f(void), otherwise
// it will fail some sema check.
- (T->isVoidType() && !IsPointee))
+ (T->isVoidType() && !IsPointee) ||
+ // Do not deduce address spaces for dependent types because they might end
+ // up instantiating to a type with an explicit address space qualifier.
+ T->isDependentType())
return;
- LangAS ImpAddr;
+ LangAS ImpAddr = LangAS::Default;
// Put OpenCL automatic variable in private address space.
// OpenCL v1.2 s6.5:
// The default address space name for arguments to a function in a
@@ -7222,7 +7275,9 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
if (IsPointee) {
ImpAddr = LangAS::opencl_generic;
} else {
- if (D.getContext() == DeclaratorContext::FileContext) {
+ if (D.getContext() == DeclaratorContext::TemplateArgContext) {
+ // Do not deduce address space for non-pointee type in template arg.
+ } else if (D.getContext() == DeclaratorContext::FileContext) {
ImpAddr = LangAS::opencl_global;
} else {
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
@@ -7262,6 +7317,9 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// sure we visit every element once. Copy the attributes list, and iterate
// over that.
ParsedAttributesView AttrsCopy{attrs};
+
+ state.setParsedNoDeref(false);
+
for (ParsedAttr &attr : AttrsCopy) {
// Skip attributes that were marked to be invalid.
@@ -7273,7 +7331,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// not appertain to a DeclaratorChunk. If we handle them as type
// attributes, accept them in that position and diagnose the GCC
// incompatibility.
- if (attr.getScopeName() && attr.getScopeName()->isStr("gnu")) {
+ if (attr.isGNUScope()) {
bool IsTypeAttr = attr.isTypeAttr();
if (TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(),
@@ -7359,6 +7417,15 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
HandleLifetimeBoundAttr(state, type, attr);
break;
+ case ParsedAttr::AT_NoDeref: {
+ ASTContext &Ctx = state.getSema().Context;
+ type = state.getAttributedType(createSimpleAttr<NoDerefAttr>(Ctx, attr),
+ type, type);
+ attr.setUsedAsTypeAttr();
+ state.setParsedNoDeref(true);
+ break;
+ }
+
MS_TYPE_ATTRS_CASELIST:
if (!handleMSPointerTypeQualifierAttr(state, attr, type))
attr.setUsedAsTypeAttr();
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index a75d307563..3f4b21eb55 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -597,7 +597,7 @@ public:
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals,
+ Qualifiers ThisTypeQuals,
Fn TransformExceptionSpec);
bool TransformExceptionSpec(SourceLocation Loc,
@@ -684,15 +684,13 @@ public:
OMPClause *Transform ## Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
- /// Build a new qualified type given its unqualified type and type
- /// qualifiers.
+ /// Build a new qualified type given its unqualified type and type location.
///
/// By default, this routine adds type qualifiers only to types that can
/// have qualifiers, and silently suppresses those qualifiers that are not
/// permitted. Subclasses may override this routine to provide different
/// behavior.
- QualType RebuildQualifiedType(QualType T, SourceLocation Loc,
- Qualifiers Quals);
+ QualType RebuildQualifiedType(QualType T, QualifiedTypeLoc TL);
/// Build a new pointer type given its pointee type.
///
@@ -2096,8 +2094,8 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildPredefinedExpr(SourceLocation Loc,
- PredefinedExpr::IdentType IT) {
- return getSema().BuildPredefinedExpr(Loc, IT);
+ PredefinedExpr::IdentKind IK) {
+ return getSema().BuildPredefinedExpr(Loc, IK);
}
/// Build a new expression that references a declaration.
@@ -3344,8 +3342,8 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
if (!Init)
return Init;
- if (ExprWithCleanups *ExprTemp = dyn_cast<ExprWithCleanups>(Init))
- Init = ExprTemp->getSubExpr();
+ if (auto *FE = dyn_cast<FullExpr>(Init))
+ Init = FE->getSubExpr();
if (auto *AIL = dyn_cast<ArrayInitLoopExpr>(Init))
Init = AIL->getCommonExpr();
@@ -4228,8 +4226,9 @@ TreeTransform<Derived>::TransformTypeWithDeducedTST(TypeSourceInfo *DI) {
return nullptr;
if (QTL) {
- Result = getDerived().RebuildQualifiedType(
- Result, QTL.getBeginLoc(), QTL.getType().getLocalQualifiers());
+ Result = getDerived().RebuildQualifiedType(Result, QTL);
+ if (Result.isNull())
+ return nullptr;
TLB.TypeWasModifiedSafely(Result);
}
@@ -4240,13 +4239,14 @@ template<typename Derived>
QualType
TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
QualifiedTypeLoc T) {
- Qualifiers Quals = T.getType().getLocalQualifiers();
-
QualType Result = getDerived().TransformType(TLB, T.getUnqualifiedLoc());
if (Result.isNull())
return QualType();
- Result = getDerived().RebuildQualifiedType(Result, T.getBeginLoc(), Quals);
+ Result = getDerived().RebuildQualifiedType(Result, T);
+
+ if (Result.isNull())
+ return QualType();
// RebuildQualifiedType might have updated the type, but not in a way
// that invalidates the TypeLoc. (There's no location information for
@@ -4256,15 +4256,29 @@ TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
return Result;
}
-template<typename Derived>
+template <typename Derived>
QualType TreeTransform<Derived>::RebuildQualifiedType(QualType T,
- SourceLocation Loc,
- Qualifiers Quals) {
+ QualifiedTypeLoc TL) {
+
+ SourceLocation Loc = TL.getBeginLoc();
+ Qualifiers Quals = TL.getType().getLocalQualifiers();
+
+ if (((T.getAddressSpace() != LangAS::Default &&
+ Quals.getAddressSpace() != LangAS::Default)) &&
+ T.getAddressSpace() != Quals.getAddressSpace()) {
+ SemaRef.Diag(Loc, diag::err_address_space_mismatch_templ_inst)
+ << TL.getType() << T;
+ return QualType();
+ }
+
// C++ [dcl.fct]p7:
// [When] adding cv-qualifications on top of the function type [...] the
// cv-qualifiers are ignored.
- if (T->isFunctionType())
+ if (T->isFunctionType()) {
+ T = SemaRef.getASTContext().getAddrSpaceQualType(T,
+ Quals.getAddressSpace());
return T;
+ }
// C++ [dcl.ref]p1:
// when the cv-qualifiers are introduced through the use of a typedef-name
@@ -5231,7 +5245,7 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
SmallVector<QualType, 4> ExceptionStorage;
TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
return getDerived().TransformFunctionProtoType(
- TLB, TL, nullptr, 0,
+ TLB, TL, nullptr, Qualifiers(),
[&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
return This->TransformExceptionSpec(TL.getBeginLoc(), ESI,
ExceptionStorage, Changed);
@@ -5241,7 +5255,7 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
template<typename Derived> template<typename Fn>
QualType TreeTransform<Derived>::TransformFunctionProtoType(
TypeLocBuilder &TLB, FunctionProtoTypeLoc TL, CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals, Fn TransformExceptionSpec) {
+ Qualifiers ThisTypeQuals, Fn TransformExceptionSpec) {
// Transform the parameters and return type.
//
@@ -5284,6 +5298,13 @@ QualType TreeTransform<Derived>::TransformFunctionProtoType(
if (ResultType.isNull())
return QualType();
+ // Return type can not be qualified with an address space.
+ if (ResultType.getAddressSpace() != LangAS::Default) {
+ SemaRef.Diag(TL.getReturnLoc().getBeginLoc(),
+ diag::err_attribute_address_function_type);
+ return QualType();
+ }
+
if (getDerived().TransformFunctionTypeParams(
TL.getBeginLoc(), TL.getParams(),
TL.getTypePtr()->param_type_begin(),
@@ -6770,6 +6791,9 @@ TreeTransform<Derived>::TransformDoStmt(DoStmt *S) {
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
+ if (getSema().getLangOpts().OpenMP)
+ getSema().startOpenMPLoop();
+
// Transform the initialization statement
StmtResult Init = getDerived().TransformStmt(S->getInit());
if (Init.isInvalid())
@@ -8437,6 +8461,33 @@ OMPClause *TreeTransform<Derived>::TransformOMPUnifiedAddressClause(
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPUnifiedSharedMemoryClause(
+ OMPUnifiedSharedMemoryClause *C) {
+ llvm_unreachable(
+ "unified_shared_memory clause cannot appear in dependent context");
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPReverseOffloadClause(
+ OMPReverseOffloadClause *C) {
+ llvm_unreachable("reverse_offload clause cannot appear in dependent context");
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPDynamicAllocatorsClause(
+ OMPDynamicAllocatorsClause *C) {
+ llvm_unreachable(
+ "dynamic_allocators clause cannot appear in dependent context");
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPAtomicDefaultMemOrderClause(
+ OMPAtomicDefaultMemOrderClause *C) {
+ llvm_unreachable(
+ "atomic_default_mem_order clause cannot appear in dependent context");
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPPrivateClause(OMPPrivateClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
@@ -8896,12 +8947,18 @@ TreeTransform<Derived>::TransformOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
//===----------------------------------------------------------------------===//
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformConstantExpr(ConstantExpr *E) {
+ return TransformExpr(E->getSubExpr());
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformPredefinedExpr(PredefinedExpr *E) {
if (!E->isTypeDependent())
return E;
return getDerived().RebuildPredefinedExpr(E->getLocation(),
- E->getIdentType());
+ E->getIdentKind());
}
template<typename Derived>
@@ -10970,7 +11027,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
SmallVector<QualType, 4> ExceptionStorage;
TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
QualType NewCallOpType = TransformFunctionProtoType(
- NewCallOpTLBuilder, OldCallOpFPTL, nullptr, 0,
+ NewCallOpTLBuilder, OldCallOpFPTL, nullptr, Qualifiers(),
[&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
return This->TransformExceptionSpec(OldCallOpFPTL.getBeginLoc(), ESI,
ExceptionStorage, Changed);
diff --git a/lib/Serialization/ASTCommon.cpp b/lib/Serialization/ASTCommon.cpp
index 0be9a3bfd6..ca826d83d4 100644
--- a/lib/Serialization/ASTCommon.cpp
+++ b/lib/Serialization/ASTCommon.cpp
@@ -213,6 +213,11 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
ID = PREDEF_TYPE_##Id##_ID; \
break;
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ ID = PREDEF_TYPE_##Id##_ID; \
+ break;
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
ID = PREDEF_TYPE_SAMPLER_ID;
break;
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index f8d98eeeb6..558100d45c 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -61,7 +61,6 @@
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Basic/Version.h"
-#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/MacroInfo.h"
@@ -81,6 +80,7 @@
#include "clang/Serialization/Module.h"
#include "clang/Serialization/ModuleFileExtension.h"
#include "clang/Serialization/ModuleManager.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "clang/Serialization/SerializationDiagnostic.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
@@ -5388,7 +5388,6 @@ bool ASTReader::ParsePreprocessorOptions(const RecordData &Record,
PPOpts.UsePredefines = Record[Idx++];
PPOpts.DetailedRecord = Record[Idx++];
PPOpts.ImplicitPCHInclude = ReadString(Record, Idx);
- PPOpts.ImplicitPTHInclude = ReadString(Record, Idx);
PPOpts.ObjCXXARCStandardLibrary =
static_cast<ObjCXXARCStandardLibraryKind>(Record[Idx++]);
SuggestedPredefines.clear();
@@ -6051,7 +6050,7 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
EPI.Variadic = Record[Idx++];
EPI.HasTrailingReturn = Record[Idx++];
- EPI.TypeQuals = Record[Idx++];
+ EPI.TypeQuals = Qualifiers::fromOpaqueValue(Record[Idx++]);
EPI.RefQualifier = static_cast<RefQualifierKind>(Record[Idx++]);
SmallVector<QualType, 8> ExceptionStorage;
readExceptionSpec(*Loc.F, ExceptionStorage, EPI.ExceptionSpec, Record, Idx);
@@ -6963,6 +6962,11 @@ QualType ASTReader::GetType(TypeID ID) {
T = Context.SingletonId; \
break;
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case PREDEF_TYPE_##Id##_ID: \
+ T = Context.Id##Ty; \
+ break;
+#include "clang/Basic/OpenCLExtensionTypes.def"
case PREDEF_TYPE_SAMPLER_ID:
T = Context.OCLSamplerTy;
break;
@@ -11723,7 +11727,19 @@ OMPClause *OMPClauseReader::readClause() {
case OMPC_unified_address:
C = new (Context) OMPUnifiedAddressClause();
break;
- case OMPC_private:
+ case OMPC_unified_shared_memory:
+ C = new (Context) OMPUnifiedSharedMemoryClause();
+ break;
+ case OMPC_reverse_offload:
+ C = new (Context) OMPReverseOffloadClause();
+ break;
+ case OMPC_dynamic_allocators:
+ C = new (Context) OMPDynamicAllocatorsClause();
+ break;
+ case OMPC_atomic_default_mem_order:
+ C = new (Context) OMPAtomicDefaultMemOrderClause();
+ break;
+ case OMPC_private:
C = OMPPrivateClause::CreateEmpty(Context, Record.readInt());
break;
case OMPC_firstprivate:
@@ -11953,6 +11969,23 @@ void OMPClauseReader::VisitOMPNogroupClause(OMPNogroupClause *) {}
void OMPClauseReader::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {}
+void OMPClauseReader::VisitOMPUnifiedSharedMemoryClause(
+ OMPUnifiedSharedMemoryClause *) {}
+
+void OMPClauseReader::VisitOMPReverseOffloadClause(OMPReverseOffloadClause *) {}
+
+void
+OMPClauseReader::VisitOMPDynamicAllocatorsClause(OMPDynamicAllocatorsClause *) {
+}
+
+void OMPClauseReader::VisitOMPAtomicDefaultMemOrderClause(
+ OMPAtomicDefaultMemOrderClause *C) {
+ C->setAtomicDefaultMemOrderKind(
+ static_cast<OpenMPAtomicDefaultMemOrderClauseKind>(Record.readInt()));
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setAtomicDefaultMemOrderKindKwLoc(Record.readSourceLocation());
+}
+
void OMPClauseReader::VisitOMPPrivateClause(OMPPrivateClause *C) {
C->setLParenLoc(Record.readSourceLocation());
unsigned NumVars = C->varlist_size();
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index 6d541aa988..8c1710f660 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -48,7 +48,6 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Sema/IdentifierResolver.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ContinuousRangeMap.h"
@@ -1365,6 +1364,7 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->NonParmVarDeclBits.IsInitCapture = Record.readInt();
VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record.readInt();
VD->NonParmVarDeclBits.ImplicitParamKind = Record.readInt();
+ VD->NonParmVarDeclBits.EscapingByref = Record.readInt();
}
auto VarLinkage = Linkage(Record.readInt());
VD->setCachedLinkage(VarLinkage);
@@ -2912,25 +2912,30 @@ static bool hasSameOverloadableAttrs(const FunctionDecl *A,
// Note that pass_object_size attributes are represented in the function's
// ExtParameterInfo, so we don't need to check them here.
- // Return false if any of the enable_if expressions of A and B are different.
llvm::FoldingSetNodeID Cand1ID, Cand2ID;
auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
- auto AEnableIf = AEnableIfAttrs.begin();
- auto BEnableIf = BEnableIfAttrs.begin();
- for (; AEnableIf != AEnableIfAttrs.end() && BEnableIf != BEnableIfAttrs.end();
- ++BEnableIf, ++AEnableIf) {
+
+ for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) {
+ Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
+ Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
+
+ // Return false if the number of enable_if attributes is different.
+ if (!Cand1A || !Cand2A)
+ return false;
+
Cand1ID.clear();
Cand2ID.clear();
- AEnableIf->getCond()->Profile(Cand1ID, A->getASTContext(), true);
- BEnableIf->getCond()->Profile(Cand2ID, B->getASTContext(), true);
+ (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true);
+ (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true);
+
+ // Return false if any of the enable_if expressions of A and B are
+ // different.
if (Cand1ID != Cand2ID)
return false;
}
-
- // Return false if the number of enable_if attributes was different.
- return AEnableIf == AEnableIfAttrs.end() && BEnableIf == BEnableIfAttrs.end();
+ return true;
}
/// Determine whether the two declarations refer to the same entity.
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index a80cd81c3a..88e7bb6471 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -154,7 +154,7 @@ void ASTStmtReader::VisitStmt(Stmt *S) {
void ASTStmtReader::VisitNullStmt(NullStmt *S) {
VisitStmt(S);
S->setSemiLoc(ReadSourceLocation());
- S->HasLeadingEmptyMacro = Record.readInt();
+ S->NullStmtBits.HasLeadingEmptyMacro = Record.readInt();
}
void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
@@ -164,7 +164,7 @@ void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
while (NumStmts--)
Stmts.push_back(Record.readSubStmt());
S->setStmts(Stmts);
- S->LBraceLoc = ReadSourceLocation();
+ S->CompoundStmtBits.LBraceLoc = ReadSourceLocation();
S->RBraceLoc = ReadSourceLocation();
}
@@ -177,10 +177,13 @@ void ASTStmtReader::VisitSwitchCase(SwitchCase *S) {
void ASTStmtReader::VisitCaseStmt(CaseStmt *S) {
VisitSwitchCase(S);
+ bool CaseStmtIsGNURange = Record.readInt();
S->setLHS(Record.readSubExpr());
- S->setRHS(Record.readSubExpr());
S->setSubStmt(Record.readSubStmt());
- S->setEllipsisLoc(ReadSourceLocation());
+ if (CaseStmtIsGNURange) {
+ S->setRHS(Record.readSubExpr());
+ S->setEllipsisLoc(ReadSourceLocation());
+ }
}
void ASTStmtReader::VisitDefaultStmt(DefaultStmt *S) {
@@ -199,38 +202,59 @@ void ASTStmtReader::VisitLabelStmt(LabelStmt *S) {
void ASTStmtReader::VisitAttributedStmt(AttributedStmt *S) {
VisitStmt(S);
+ // NumAttrs in AttributedStmt is set when creating an empty
+ // AttributedStmt in AttributedStmt::CreateEmpty, since it is needed
+ // to allocate the right amount of space for the trailing Attr *.
uint64_t NumAttrs = Record.readInt();
AttrVec Attrs;
Record.readAttributes(Attrs);
(void)NumAttrs;
- assert(NumAttrs == S->NumAttrs);
+ assert(NumAttrs == S->AttributedStmtBits.NumAttrs);
assert(NumAttrs == Attrs.size());
std::copy(Attrs.begin(), Attrs.end(), S->getAttrArrayPtr());
S->SubStmt = Record.readSubStmt();
- S->AttrLoc = ReadSourceLocation();
+ S->AttributedStmtBits.AttrLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitIfStmt(IfStmt *S) {
VisitStmt(S);
+
S->setConstexpr(Record.readInt());
- S->setInit(Record.readSubStmt());
- S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+ bool HasElse = Record.readInt();
+ bool HasVar = Record.readInt();
+ bool HasInit = Record.readInt();
+
S->setCond(Record.readSubExpr());
S->setThen(Record.readSubStmt());
- S->setElse(Record.readSubStmt());
+ if (HasElse)
+ S->setElse(Record.readSubStmt());
+ if (HasVar)
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+ if (HasInit)
+ S->setInit(Record.readSubStmt());
+
S->setIfLoc(ReadSourceLocation());
- S->setElseLoc(ReadSourceLocation());
+ if (HasElse)
+ S->setElseLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
VisitStmt(S);
- S->setInit(Record.readSubStmt());
- S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+
+ bool HasInit = Record.readInt();
+ bool HasVar = Record.readInt();
+ bool AllEnumCasesCovered = Record.readInt();
+ if (AllEnumCasesCovered)
+ S->setAllEnumCasesCovered();
+
S->setCond(Record.readSubExpr());
S->setBody(Record.readSubStmt());
+ if (HasInit)
+ S->setInit(Record.readSubStmt());
+ if (HasVar)
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+
S->setSwitchLoc(ReadSourceLocation());
- if (Record.readInt())
- S->setAllEnumCasesCovered();
SwitchCase *PrevSC = nullptr;
for (auto E = Record.size(); Record.getIdx() != E; ) {
@@ -246,10 +270,14 @@ void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
VisitStmt(S);
- S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+
+ bool HasVar = Record.readInt();
S->setCond(Record.readSubExpr());
S->setBody(Record.readSubStmt());
+ if (HasVar)
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+
S->setWhileLoc(ReadSourceLocation());
}
@@ -300,9 +328,14 @@ void ASTStmtReader::VisitBreakStmt(BreakStmt *S) {
void ASTStmtReader::VisitReturnStmt(ReturnStmt *S) {
VisitStmt(S);
+
+ bool HasNRVOCandidate = Record.readInt();
+
S->setRetValue(Record.readSubExpr());
+ if (HasNRVOCandidate)
+ S->setNRVOCandidate(ReadDeclAs<VarDecl>());
+
S->setReturnLoc(ReadSourceLocation());
- S->setNRVOCandidate(ReadDeclAs<VarDecl>());
}
void ASTStmtReader::VisitDeclStmt(DeclStmt *S) {
@@ -491,11 +524,19 @@ void ASTStmtReader::VisitExpr(Expr *E) {
"Incorrect expression field count");
}
+void ASTStmtReader::VisitConstantExpr(ConstantExpr *E) {
+ VisitExpr(E);
+ E->setSubExpr(Record.readSubExpr());
+}
+
void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
+ bool HasFunctionName = Record.readInt();
+ E->PredefinedExprBits.HasFunctionName = HasFunctionName;
+ E->PredefinedExprBits.Kind = Record.readInt();
E->setLocation(ReadSourceLocation());
- E->Type = (PredefinedExpr::IdentType)Record.readInt();
- E->FnName = cast_or_null<StringLiteral>(Record.readSubExpr());
+ if (HasFunctionName)
+ E->setFunctionName(cast<StringLiteral>(Record.readSubExpr()));
}
void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
@@ -554,22 +595,35 @@ void ASTStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) {
void ASTStmtReader::VisitStringLiteral(StringLiteral *E) {
VisitExpr(E);
- unsigned Len = Record.readInt();
- assert(Record.peekInt() == E->getNumConcatenated() &&
- "Wrong number of concatenated tokens!");
- Record.skipInts(1);
- auto kind = static_cast<StringLiteral::StringKind>(Record.readInt());
- bool isPascal = Record.readInt();
-
- // Read string data
- auto B = &Record.peekInt();
- SmallString<16> Str(B, B + Len);
- E->setString(Record.getContext(), Str, kind, isPascal);
- Record.skipInts(Len);
- // Read source locations
- for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
+ // NumConcatenated, Length and CharByteWidth are set by the empty
+ // ctor since they are needed to allocate storage for the trailing objects.
+ unsigned NumConcatenated = Record.readInt();
+ unsigned Length = Record.readInt();
+ unsigned CharByteWidth = Record.readInt();
+ assert((NumConcatenated == E->getNumConcatenated()) &&
+ "Wrong number of concatenated tokens!");
+ assert((Length == E->getLength()) && "Wrong Length!");
+ assert((CharByteWidth == E->getCharByteWidth()) && "Wrong character width!");
+ E->StringLiteralBits.Kind = Record.readInt();
+ E->StringLiteralBits.IsPascal = Record.readInt();
+
+ // The character width is originally computed via mapCharByteWidth.
+ // Check that the deserialized character width is consistant with the result
+ // of calling mapCharByteWidth.
+ assert((CharByteWidth ==
+ StringLiteral::mapCharByteWidth(Record.getContext().getTargetInfo(),
+ E->getKind())) &&
+ "Wrong character width!");
+
+ // Deserialize the trailing array of SourceLocation.
+ for (unsigned I = 0; I < NumConcatenated; ++I)
E->setStrTokenLoc(I, ReadSourceLocation());
+
+ // Deserialize the trailing array of char holding the string data.
+ char *StrData = E->getStrDataAsChar();
+ for (unsigned I = 0; I < Length * CharByteWidth; ++I)
+ StrData[I] = Record.readInt();
}
void ASTStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
@@ -589,10 +643,9 @@ void ASTStmtReader::VisitParenExpr(ParenExpr *E) {
void ASTStmtReader::VisitParenListExpr(ParenListExpr *E) {
VisitExpr(E);
unsigned NumExprs = Record.readInt();
- E->Exprs = new (Record.getContext()) Stmt*[NumExprs];
- for (unsigned i = 0; i != NumExprs; ++i)
- E->Exprs[i] = Record.readSubStmt();
- E->NumExprs = NumExprs;
+ assert((NumExprs == E->getNumExprs()) && "Wrong NumExprs!");
+ for (unsigned I = 0; I != NumExprs; ++I)
+ E->getTrailingObjects<Stmt *>()[I] = Record.readSubStmt();
E->LParenLoc = ReadSourceLocation();
E->RParenLoc = ReadSourceLocation();
}
@@ -678,11 +731,13 @@ void ASTStmtReader::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
void ASTStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
- E->setNumArgs(Record.getContext(), Record.readInt());
+ unsigned NumArgs = Record.readInt();
+ assert((NumArgs == E->getNumArgs()) && "Wrong NumArgs!");
E->setRParenLoc(ReadSourceLocation());
E->setCallee(Record.readSubExpr());
- for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ for (unsigned I = 0; I != NumArgs; ++I)
E->setArg(I, Record.readSubExpr());
+ E->setADLCallKind(static_cast<CallExpr::ADLCallKind>(Record.readInt()));
}
void ASTStmtReader::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
@@ -1419,21 +1474,21 @@ void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
void ASTStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
VisitExpr(E);
- E->ThrowLoc = ReadSourceLocation();
- E->Op = Record.readSubExpr();
- E->IsThrownVariableInScope = Record.readInt();
+ E->CXXThrowExprBits.ThrowLoc = ReadSourceLocation();
+ E->Operand = Record.readSubExpr();
+ E->CXXThrowExprBits.IsThrownVariableInScope = Record.readInt();
}
void ASTStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
VisitExpr(E);
E->Param = ReadDeclAs<ParmVarDecl>();
- E->Loc = ReadSourceLocation();
+ E->CXXDefaultArgExprBits.Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
VisitExpr(E);
E->Field = ReadDeclAs<FieldDecl>();
- E->Loc = ReadSourceLocation();
+ E->CXXDefaultInitExprBits.Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
@@ -1474,13 +1529,13 @@ void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
void ASTStmtReader::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
VisitExpr(E);
- E->GlobalDelete = Record.readInt();
- E->ArrayForm = Record.readInt();
- E->ArrayFormAsWritten = Record.readInt();
- E->UsualArrayDeleteWantsSize = Record.readInt();
+ E->CXXDeleteExprBits.GlobalDelete = Record.readInt();
+ E->CXXDeleteExprBits.ArrayForm = Record.readInt();
+ E->CXXDeleteExprBits.ArrayFormAsWritten = Record.readInt();
+ E->CXXDeleteExprBits.UsualArrayDeleteWantsSize = Record.readInt();
E->OperatorDelete = ReadDeclAs<FunctionDecl>();
E->Argument = Record.readSubExpr();
- E->Loc = ReadSourceLocation();
+ E->CXXDeleteExprBits.Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
@@ -1856,6 +1911,8 @@ void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
D->setCombinedCond(Record.readSubExpr());
D->setCombinedNextLowerBound(Record.readSubExpr());
D->setCombinedNextUpperBound(Record.readSubExpr());
+ D->setCombinedDistCond(Record.readSubExpr());
+ D->setCombinedParForInDistCond(Record.readSubExpr());
}
SmallVector<Expr *, 4> Sub;
unsigned CollapsedNum = D->getCollapsedNumber();
@@ -2261,7 +2318,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_CASE:
- S = new (Context) CaseStmt(Empty);
+ S = CaseStmt::CreateEmpty(
+ Context,
+ /*CaseStmtIsGNURange*/ Record[ASTStmtReader::NumStmtFields + 3]);
break;
case STMT_DEFAULT:
@@ -2279,15 +2338,24 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_IF:
- S = new (Context) IfStmt(Empty);
+ S = IfStmt::CreateEmpty(
+ Context,
+ /* HasElse=*/Record[ASTStmtReader::NumStmtFields + 1],
+ /* HasVar=*/Record[ASTStmtReader::NumStmtFields + 2],
+ /* HasInit=*/Record[ASTStmtReader::NumStmtFields + 3]);
break;
case STMT_SWITCH:
- S = new (Context) SwitchStmt(Empty);
+ S = SwitchStmt::CreateEmpty(
+ Context,
+ /* HasInit=*/Record[ASTStmtReader::NumStmtFields],
+ /* HasVar=*/Record[ASTStmtReader::NumStmtFields + 1]);
break;
case STMT_WHILE:
- S = new (Context) WhileStmt(Empty);
+ S = WhileStmt::CreateEmpty(
+ Context,
+ /* HasVar=*/Record[ASTStmtReader::NumStmtFields]);
break;
case STMT_DO:
@@ -2315,7 +2383,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_RETURN:
- S = new (Context) ReturnStmt(Empty);
+ S = ReturnStmt::CreateEmpty(
+ Context, /* HasNRVOCandidate=*/Record[ASTStmtReader::NumStmtFields]);
break;
case STMT_DECL:
@@ -2331,12 +2400,18 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_CAPTURED:
- S = CapturedStmt::CreateDeserialized(Context,
- Record[ASTStmtReader::NumStmtFields]);
+ S = CapturedStmt::CreateDeserialized(
+ Context, Record[ASTStmtReader::NumStmtFields]);
+ break;
+
+ case EXPR_CONSTANT:
+ S = new (Context) ConstantExpr(Empty);
break;
case EXPR_PREDEFINED:
- S = new (Context) PredefinedExpr(Empty);
+ S = PredefinedExpr::CreateEmpty(
+ Context,
+ /*HasFunctionName*/ Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_DECL_REF:
@@ -2362,8 +2437,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_STRING_LITERAL:
- S = StringLiteral::CreateEmpty(Context,
- Record[ASTStmtReader::NumExprFields + 1]);
+ S = StringLiteral::CreateEmpty(
+ Context,
+ /* NumConcatenated=*/Record[ASTStmtReader::NumExprFields],
+ /* Length=*/Record[ASTStmtReader::NumExprFields + 1],
+ /* CharByteWidth=*/Record[ASTStmtReader::NumExprFields + 2]);
break;
case EXPR_CHARACTER_LITERAL:
@@ -2375,7 +2453,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_PAREN_LIST:
- S = new (Context) ParenListExpr(Empty);
+ S = ParenListExpr::CreateEmpty(
+ Context,
+ /* NumExprs=*/Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_UNARY_OPERATOR:
@@ -2401,7 +2481,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CALL:
- S = new (Context) CallExpr(Context, Stmt::CallExprClass, Empty);
+ S = new (Context) CallExpr(
+ Context, /* NumArgs=*/Record[ASTStmtReader::NumExprFields],
+ Empty);
break;
case EXPR_MEMBER: {
@@ -2991,11 +3073,15 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
}
case EXPR_CXX_OPERATOR_CALL:
- S = new (Context) CXXOperatorCallExpr(Context, Empty);
+ S = new (Context) CXXOperatorCallExpr(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
+ Empty);
break;
case EXPR_CXX_MEMBER_CALL:
- S = new (Context) CXXMemberCallExpr(Context, Empty);
+ S = new (Context) CXXMemberCallExpr(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
+ Empty);
break;
case EXPR_CXX_CONSTRUCT:
@@ -3035,7 +3121,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_USER_DEFINED_LITERAL:
- S = new (Context) UserDefinedLiteral(Context, Empty);
+ S = new (Context) UserDefinedLiteral(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
case EXPR_CXX_STD_INITIALIZER_LIST:
@@ -3205,7 +3292,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CUDA_KERNEL_CALL:
- S = new (Context) CUDAKernelCallExpr(Context, Empty);
+ S = new (Context) CUDAKernelCallExpr(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
case EXPR_ASTYPE:
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index 570a1e1643..45de91f76f 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -310,7 +310,7 @@ void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
Record.push_back(T->isVariadic());
Record.push_back(T->hasTrailingReturn());
- Record.push_back(T->getTypeQuals());
+ Record.push_back(T->getTypeQuals().getAsOpaqueValue());
Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
addExceptionSpec(T, Record);
@@ -1695,7 +1695,6 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
// Detailed record is important since it is used for the module cache hash.
Record.push_back(PPOpts.DetailedRecord);
AddString(PPOpts.ImplicitPCHInclude, Record);
- AddString(PPOpts.ImplicitPTHInclude, Record);
Record.push_back(static_cast<unsigned>(PPOpts.ObjCXXARCStandardLibrary));
Stream.EmitRecord(PREPROCESSOR_OPTIONS, Record);
@@ -5014,13 +5013,16 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteFPPragmaOptions(SemaRef.getFPOptions());
WriteOpenCLExtensions(SemaRef);
WriteOpenCLExtensionTypes(SemaRef);
- WriteOpenCLExtensionDecls(SemaRef);
WriteCUDAPragmas(SemaRef);
// If we're emitting a module, write out the submodule information.
if (WritingModule)
WriteSubmodules(WritingModule);
+ // We need to have information about submodules to correctly deserialize
+ // decls from OpenCLExtensionDecls block
+ WriteOpenCLExtensionDecls(SemaRef);
+
Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
// Write the record containing external, unnamed definitions.
@@ -6932,3 +6934,19 @@ void OMPClauseWriter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
}
void OMPClauseWriter::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {}
+
+void OMPClauseWriter::VisitOMPUnifiedSharedMemoryClause(
+ OMPUnifiedSharedMemoryClause *) {}
+
+void OMPClauseWriter::VisitOMPReverseOffloadClause(OMPReverseOffloadClause *) {}
+
+void
+OMPClauseWriter::VisitOMPDynamicAllocatorsClause(OMPDynamicAllocatorsClause *) {
+}
+
+void OMPClauseWriter::VisitOMPAtomicDefaultMemOrderClause(
+ OMPAtomicDefaultMemOrderClause *C) {
+ Record.push_back(C->getAtomicDefaultMemOrderKind());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getAtomicDefaultMemOrderKindKwLoc());
+}
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index e120f14b81..5ec11dda8a 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -938,6 +938,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(static_cast<unsigned>(IPD->getParameterKind()));
else
Record.push_back(0);
+ Record.push_back(D->isEscapingByref());
}
Record.push_back(D->getLinkageInternal());
@@ -1008,6 +1009,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
!D->isInitCapture() &&
!D->isPreviousDeclInSameBlockScope() &&
!(D->hasAttr<BlocksAttr>() && D->getType()->getAsCXXRecordDecl()) &&
+ !D->isEscapingByref() &&
D->getStorageDuration() != SD_Static &&
!D->getMemberSpecializationInfo())
AbbrevToUse = Writer.getDeclVarAbbrev();
@@ -2072,6 +2074,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isInitCapture
Abv->Add(BitCodeAbbrevOp(0)); // isPrevDeclInSameScope
Abv->Add(BitCodeAbbrevOp(0)); // ImplicitParamKind
+ Abv->Add(BitCodeAbbrevOp(0)); // EscapingByref
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // IsInitICE (local)
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // VarKind (local enum)
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index 21051d866d..3a14c39fa1 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -73,7 +73,7 @@ void ASTStmtWriter::VisitStmt(Stmt *S) {
void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
VisitStmt(S);
Record.AddSourceLocation(S->getSemiLoc());
- Record.push_back(S->HasLeadingEmptyMacro);
+ Record.push_back(S->NullStmtBits.HasLeadingEmptyMacro);
Code = serialization::STMT_NULL;
}
@@ -96,10 +96,13 @@ void ASTStmtWriter::VisitSwitchCase(SwitchCase *S) {
void ASTStmtWriter::VisitCaseStmt(CaseStmt *S) {
VisitSwitchCase(S);
+ Record.push_back(S->caseStmtIsGNURange());
Record.AddStmt(S->getLHS());
- Record.AddStmt(S->getRHS());
Record.AddStmt(S->getSubStmt());
- Record.AddSourceLocation(S->getEllipsisLoc());
+ if (S->caseStmtIsGNURange()) {
+ Record.AddStmt(S->getRHS());
+ Record.AddSourceLocation(S->getEllipsisLoc());
+ }
Code = serialization::STMT_CASE;
}
@@ -128,25 +131,50 @@ void ASTStmtWriter::VisitAttributedStmt(AttributedStmt *S) {
void ASTStmtWriter::VisitIfStmt(IfStmt *S) {
VisitStmt(S);
+
+ bool HasElse = S->getElse() != nullptr;
+ bool HasVar = S->getConditionVariableDeclStmt() != nullptr;
+ bool HasInit = S->getInit() != nullptr;
+
Record.push_back(S->isConstexpr());
- Record.AddStmt(S->getInit());
- Record.AddDeclRef(S->getConditionVariable());
+ Record.push_back(HasElse);
+ Record.push_back(HasVar);
+ Record.push_back(HasInit);
+
Record.AddStmt(S->getCond());
Record.AddStmt(S->getThen());
- Record.AddStmt(S->getElse());
+ if (HasElse)
+ Record.AddStmt(S->getElse());
+ if (HasVar)
+ Record.AddDeclRef(S->getConditionVariable());
+ if (HasInit)
+ Record.AddStmt(S->getInit());
+
Record.AddSourceLocation(S->getIfLoc());
- Record.AddSourceLocation(S->getElseLoc());
+ if (HasElse)
+ Record.AddSourceLocation(S->getElseLoc());
+
Code = serialization::STMT_IF;
}
void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
VisitStmt(S);
- Record.AddStmt(S->getInit());
- Record.AddDeclRef(S->getConditionVariable());
+
+ bool HasInit = S->getInit() != nullptr;
+ bool HasVar = S->getConditionVariableDeclStmt() != nullptr;
+ Record.push_back(HasInit);
+ Record.push_back(HasVar);
+ Record.push_back(S->isAllEnumCasesCovered());
+
Record.AddStmt(S->getCond());
Record.AddStmt(S->getBody());
+ if (HasInit)
+ Record.AddStmt(S->getInit());
+ if (HasVar)
+ Record.AddDeclRef(S->getConditionVariable());
+
Record.AddSourceLocation(S->getSwitchLoc());
- Record.push_back(S->isAllEnumCasesCovered());
+
for (SwitchCase *SC = S->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase())
Record.push_back(Writer.RecordSwitchCaseID(SC));
@@ -155,9 +183,15 @@ void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
void ASTStmtWriter::VisitWhileStmt(WhileStmt *S) {
VisitStmt(S);
- Record.AddDeclRef(S->getConditionVariable());
+
+ bool HasVar = S->getConditionVariableDeclStmt() != nullptr;
+ Record.push_back(HasVar);
+
Record.AddStmt(S->getCond());
Record.AddStmt(S->getBody());
+ if (HasVar)
+ Record.AddDeclRef(S->getConditionVariable());
+
Record.AddSourceLocation(S->getWhileLoc());
Code = serialization::STMT_WHILE;
}
@@ -215,9 +249,15 @@ void ASTStmtWriter::VisitBreakStmt(BreakStmt *S) {
void ASTStmtWriter::VisitReturnStmt(ReturnStmt *S) {
VisitStmt(S);
+
+ bool HasNRVOCandidate = S->getNRVOCandidate() != nullptr;
+ Record.push_back(HasNRVOCandidate);
+
Record.AddStmt(S->getRetValue());
+ if (HasNRVOCandidate)
+ Record.AddDeclRef(S->getNRVOCandidate());
+
Record.AddSourceLocation(S->getReturnLoc());
- Record.AddDeclRef(S->getNRVOCandidate());
Code = serialization::STMT_RETURN;
}
@@ -386,11 +426,21 @@ void ASTStmtWriter::VisitExpr(Expr *E) {
Record.push_back(E->getObjectKind());
}
+void ASTStmtWriter::VisitConstantExpr(ConstantExpr *E) {
+ VisitExpr(E);
+ Record.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_CONSTANT;
+}
+
void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
+
+ bool HasFunctionName = E->getFunctionName() != nullptr;
+ Record.push_back(HasFunctionName);
+ Record.push_back(E->getIdentKind()); // FIXME: stable encoding
Record.AddSourceLocation(E->getLocation());
- Record.push_back(E->getIdentType()); // FIXME: stable encoding
- Record.AddStmt(E->getFunctionName());
+ if (HasFunctionName)
+ Record.AddStmt(E->getFunctionName());
Code = serialization::EXPR_PREDEFINED;
}
@@ -468,17 +518,23 @@ void ASTStmtWriter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
void ASTStmtWriter::VisitStringLiteral(StringLiteral *E) {
VisitExpr(E);
- Record.push_back(E->getByteLength());
+
+ // Store the various bits of data of StringLiteral.
Record.push_back(E->getNumConcatenated());
+ Record.push_back(E->getLength());
+ Record.push_back(E->getCharByteWidth());
Record.push_back(E->getKind());
Record.push_back(E->isPascal());
- // FIXME: String data should be stored as a blob at the end of the
- // StringLiteral. However, we can't do so now because we have no
- // provision for coping with abbreviations when we're jumping around
- // the AST file during deserialization.
- Record.append(E->getBytes().begin(), E->getBytes().end());
+
+ // Store the trailing array of SourceLocation.
for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
Record.AddSourceLocation(E->getStrTokenLoc(I));
+
+ // Store the trailing array of char holding the string data.
+ StringRef StrData = E->getBytes();
+ for (unsigned I = 0, N = E->getByteLength(); I != N; ++I)
+ Record.push_back(StrData[I]);
+
Code = serialization::EXPR_STRING_LITERAL;
}
@@ -503,11 +559,11 @@ void ASTStmtWriter::VisitParenExpr(ParenExpr *E) {
void ASTStmtWriter::VisitParenListExpr(ParenListExpr *E) {
VisitExpr(E);
- Record.push_back(E->NumExprs);
- for (unsigned i=0; i != E->NumExprs; ++i)
- Record.AddStmt(E->Exprs[i]);
- Record.AddSourceLocation(E->LParenLoc);
- Record.AddSourceLocation(E->RParenLoc);
+ Record.push_back(E->getNumExprs());
+ for (auto *SubStmt : E->exprs())
+ Record.AddStmt(SubStmt);
+ Record.AddSourceLocation(E->getLParenLoc());
+ Record.AddSourceLocation(E->getRParenLoc());
Code = serialization::EXPR_PAREN_LIST;
}
@@ -595,6 +651,7 @@ void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
Arg != ArgEnd; ++Arg)
Record.AddStmt(*Arg);
+ Record.push_back(static_cast<unsigned>(E->getADLCallKind()));
Code = serialization::EXPR_CALL;
}
@@ -605,8 +662,8 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
if (E->hasQualifier())
Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
- Record.push_back(E->HasTemplateKWAndArgsInfo);
- if (E->HasTemplateKWAndArgsInfo) {
+ Record.push_back(E->hasTemplateKWAndArgsInfo());
+ if (E->hasTemplateKWAndArgsInfo()) {
Record.AddSourceLocation(E->getTemplateKeywordLoc());
unsigned NumTemplateArgs = E->getNumTemplateArgs();
Record.push_back(NumTemplateArgs);
@@ -1450,7 +1507,7 @@ void ASTStmtWriter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
Record.push_back(E->doesUsualArrayDeleteWantSize());
Record.AddDeclRef(E->getOperatorDelete());
Record.AddStmt(E->getArgument());
- Record.AddSourceLocation(E->getSourceRange().getBegin());
+ Record.AddSourceLocation(E->getBeginLoc());
Code = serialization::EXPR_CXX_DELETE;
}
@@ -1854,6 +1911,8 @@ void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
Record.AddStmt(D->getCombinedCond());
Record.AddStmt(D->getCombinedNextLowerBound());
Record.AddStmt(D->getCombinedNextUpperBound());
+ Record.AddStmt(D->getCombinedDistCond());
+ Record.AddStmt(D->getCombinedParForInDistCond());
}
for (auto I : D->counters()) {
Record.AddStmt(I);
diff --git a/lib/Serialization/CMakeLists.txt b/lib/Serialization/CMakeLists.txt
index 95b33c388c..a312cb91eb 100644
--- a/lib/Serialization/CMakeLists.txt
+++ b/lib/Serialization/CMakeLists.txt
@@ -17,6 +17,7 @@ add_clang_library(clangSerialization
Module.cpp
ModuleFileExtension.cpp
ModuleManager.cpp
+ PCHContainerOperations.cpp
ADDITIONAL_HEADERS
ASTCommon.h
diff --git a/lib/Serialization/GlobalModuleIndex.cpp b/lib/Serialization/GlobalModuleIndex.cpp
index 3733638d29..e7642a3892 100644
--- a/lib/Serialization/GlobalModuleIndex.cpp
+++ b/lib/Serialization/GlobalModuleIndex.cpp
@@ -12,12 +12,12 @@
//===----------------------------------------------------------------------===//
#include "ASTReaderInternals.h"
-#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Basic/FileManager.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "clang/Serialization/Module.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallString.h"
diff --git a/lib/Serialization/ModuleManager.cpp b/lib/Serialization/ModuleManager.cpp
index fccfa88ab9..54e0c08c5b 100644
--- a/lib/Serialization/ModuleManager.cpp
+++ b/lib/Serialization/ModuleManager.cpp
@@ -16,12 +16,11 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/MemoryBufferCache.h"
-#include "clang/Basic/VirtualFileSystem.h"
-#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "clang/Serialization/Module.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -33,6 +32,7 @@
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <algorithm>
#include <cassert>
#include <memory>
@@ -150,7 +150,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
if (NewModule->Kind == MK_ImplicitModule) {
std::string TimestampFilename = NewModule->getTimestampFilename();
- vfs::Status Status;
+ llvm::vfs::Status Status;
// A cached stat value would be fine as well.
if (!FileMgr.getNoncachedStatValue(TimestampFilename, Status))
NewModule->InputFilesValidationTimestamp =
diff --git a/lib/Frontend/PCHContainerOperations.cpp b/lib/Serialization/PCHContainerOperations.cpp
index 340e8ce63f..fbc613efeb 100644
--- a/lib/Frontend/PCHContainerOperations.cpp
+++ b/lib/Serialization/PCHContainerOperations.cpp
@@ -1,4 +1,4 @@
-//===--- Frontend/PCHContainerOperations.cpp - PCH Containers ---*- C++ -*-===//
+//=== Serialization/PCHContainerOperations.cpp - PCH Containers -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/PCHContainerOperations.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Lex/ModuleLoader.h"
#include "llvm/Bitcode/BitstreamReader.h"
diff --git a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
deleted file mode 100644
index 3dec8a58c9..0000000000
--- a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//=- AllocationDiagnostics.cpp - Config options for allocation diags *- C++ -*-//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Declares the configuration functions for leaks/allocation diagnostics.
-//
-//===--------------------------
-
-#include "AllocationDiagnostics.h"
-
-namespace clang {
-namespace ento {
-
-bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts) {
- return AOpts.getBooleanOption("leak-diagnostics-reference-allocation",
- false);
-}
-
-}}
diff --git a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
deleted file mode 100644
index 62b7fab073..0000000000
--- a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//=--- AllocationDiagnostics.h - Config options for allocation diags *- C++ -*-//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Declares the configuration functions for leaks/allocation diagnostics.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONDIAGNOSTICS_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONDIAGNOSTICS_H
-
-#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
-
-namespace clang { namespace ento {
-
-/// Returns true if leak diagnostics should directly reference
-/// the allocatin site (where possible).
-///
-/// The default is false.
-///
-bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts);
-
-}}
-
-#endif
-
diff --git a/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
index cc306329dc..f9cf97e508 100644
--- a/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
@@ -45,8 +45,8 @@ class AnalysisOrderChecker
check::LiveSymbols> {
bool isCallbackEnabled(AnalyzerOptions &Opts, StringRef CallbackName) const {
- return Opts.getBooleanOption("*", false, this) ||
- Opts.getBooleanOption(CallbackName, false, this);
+ return Opts.getCheckerBooleanOption("*", false, this) ||
+ Opts.getCheckerBooleanOption(CallbackName, false, this);
}
bool isCallbackEnabled(CheckerContext &C, StringRef CallbackName) const {
diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index a21e10d948..d7f305aea9 100644
--- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -214,7 +214,7 @@ void NilArgChecker::generateBugReport(ExplodedNode *N,
auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
R->addRange(Range);
- bugreporter::trackNullOrUndefValue(N, E, *R);
+ bugreporter::trackExpressionValue(N, E, *R);
C.emitReport(std::move(R));
}
@@ -578,7 +578,7 @@ void CFRetainReleaseChecker::checkPreCall(const CallEvent &Call,
auto report = llvm::make_unique<BugReport>(BT, OS.str(), N);
report->addRange(Call.getArgSourceRange(0));
- bugreporter::trackNullOrUndefValue(N, Call.getArgExpr(0), *report);
+ bugreporter::trackExpressionValue(N, Call.getArgExpr(0), *report);
C.emitReport(std::move(report));
return;
}
@@ -800,7 +800,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
//===----------------------------------------------------------------------===//
// The map from container symbol to the container count symbol.
-// We currently will remember the last countainer count symbol encountered.
+// We currently will remember the last container count symbol encountered.
REGISTER_MAP_WITH_PROGRAMSTATE(ContainerCountMap, SymbolRef, SymbolRef)
REGISTER_MAP_WITH_PROGRAMSTATE(ContainerNonEmptyMap, SymbolRef, bool)
diff --git a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 0e781d08e2..3541b7f269 100644
--- a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -101,9 +101,10 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
// This must be resolvable at compile time, so we defer to the constant
// evaluator for a value.
SVal V = UnknownVal();
- llvm::APSInt Result;
- if (CE->EvaluateAsInt(Result, C.getASTContext(), Expr::SE_NoSideEffects)) {
+ Expr::EvalResult EVResult;
+ if (CE->EvaluateAsInt(EVResult, C.getASTContext(), Expr::SE_NoSideEffects)) {
// Make sure the result has the correct type.
+ llvm::APSInt Result = EVResult.Val.getInt();
SValBuilder &SVB = C.getSValBuilder();
BasicValueFactory &BVF = SVB.getBasicValueFactory();
BVF.getAPSIntType(CE->getType()).apply(Result);
diff --git a/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
index 306813b880..47dbac634a 100644
--- a/lib/StaticAnalyzer/Checkers/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -3,7 +3,6 @@ set(LLVM_LINK_COMPONENTS
)
add_clang_library(clangStaticAnalyzerCheckers
- AllocationDiagnostics.cpp
AnalysisOrderChecker.cpp
AnalyzerStatsChecker.cpp
ArrayBoundChecker.cpp
@@ -35,6 +34,7 @@ add_clang_library(clangStaticAnalyzerCheckers
DivZeroChecker.cpp
DynamicTypePropagation.cpp
DynamicTypeChecker.cpp
+ EnumCastOutOfRangeChecker.cpp
ExprInspectionChecker.cpp
FixedAddressChecker.cpp
GCDAntipatternChecker.cpp
@@ -52,7 +52,7 @@ add_clang_library(clangStaticAnalyzerCheckers
MallocOverflowSecurityChecker.cpp
MallocSizeofChecker.cpp
MmapWriteExecChecker.cpp
- MisusedMovedObjectChecker.cpp
+ MoveChecker.cpp
MPI-Checker/MPIBugReporter.cpp
MPI-Checker/MPIChecker.cpp
MPI-Checker/MPIFunctionClassifier.cpp
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 12a576e5d8..ed68df93be 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -124,6 +124,7 @@ public:
void evalStdCopyBackward(CheckerContext &C, const CallExpr *CE) const;
void evalStdCopyCommon(CheckerContext &C, const CallExpr *CE) const;
void evalMemset(CheckerContext &C, const CallExpr *CE) const;
+ void evalBzero(CheckerContext &C, const CallExpr *CE) const;
// Utility methods
std::pair<ProgramStateRef , ProgramStateRef >
@@ -158,7 +159,7 @@ public:
static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR);
- static bool memsetAux(const Expr *DstBuffer, const Expr *CharE,
+ static bool memsetAux(const Expr *DstBuffer, SVal CharE,
const Expr *Size, CheckerContext &C,
ProgramStateRef &State);
@@ -553,7 +554,8 @@ void CStringChecker::emitNullArgBug(CheckerContext &C, ProgramStateRef State,
BuiltinBug *BT = static_cast<BuiltinBug *>(BT_Null.get());
auto Report = llvm::make_unique<BugReport>(*BT, WarningMsg, N);
Report->addRange(S->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, S, *Report);
+ if (const auto *Ex = dyn_cast<Expr>(S))
+ bugreporter::trackExpressionValue(N, Ex, *Report);
C.emitReport(std::move(Report));
}
}
@@ -1004,11 +1006,10 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
}
}
-bool CStringChecker::memsetAux(const Expr *DstBuffer, const Expr *CharE,
+bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
const Expr *Size, CheckerContext &C,
ProgramStateRef &State) {
SVal MemVal = C.getSVal(DstBuffer);
- SVal CharVal = C.getSVal(CharE);
SVal SizeVal = C.getSVal(Size);
const MemRegion *MR = MemVal.getAsRegion();
if (!MR)
@@ -2183,13 +2184,59 @@ void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
// According to the values of the arguments, bind the value of the second
// argument to the destination buffer and set string length, or just
// invalidate the destination buffer.
- if (!memsetAux(Mem, CharE, Size, C, State))
+ if (!memsetAux(Mem, C.getSVal(CharE), Size, C, State))
return;
State = State->BindExpr(CE, LCtx, MemVal);
C.addTransition(State);
}
+void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() != 2)
+ return;
+
+ CurrentFunctionDescription = "memory clearance function";
+
+ const Expr *Mem = CE->getArg(0);
+ const Expr *Size = CE->getArg(1);
+ SVal Zero = C.getSValBuilder().makeZeroVal(C.getASTContext().IntTy);
+
+ ProgramStateRef State = C.getState();
+
+ // See if the size argument is zero.
+ SVal SizeVal = C.getSVal(Size);
+ QualType SizeTy = Size->getType();
+
+ ProgramStateRef StateZeroSize, StateNonZeroSize;
+ std::tie(StateZeroSize, StateNonZeroSize) =
+ assumeZero(C, State, SizeVal, SizeTy);
+
+ // If the size is zero, there won't be any actual memory access,
+ // In this case we just return.
+ if (StateZeroSize && !StateNonZeroSize) {
+ C.addTransition(StateZeroSize);
+ return;
+ }
+
+ // Get the value of the memory area.
+ SVal MemVal = C.getSVal(Mem);
+
+ // Ensure the memory area is not null.
+ // If it is NULL there will be a NULL pointer dereference.
+ State = checkNonNull(C, StateNonZeroSize, Mem, MemVal);
+ if (!State)
+ return;
+
+ State = CheckBufferAccess(C, State, Size, Mem);
+ if (!State)
+ return;
+
+ if (!memsetAux(Mem, Zero, Size, C, State))
+ return;
+
+ C.addTransition(State);
+}
+
static bool isCPPStdLibraryFunction(const FunctionDecl *FD, StringRef Name) {
IdentifierInfo *II = FD->getIdentifier();
if (!II)
@@ -2223,7 +2270,8 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
evalFunction = &CStringChecker::evalMemcmp;
else if (C.isCLibraryFunction(FDecl, "memmove"))
evalFunction = &CStringChecker::evalMemmove;
- else if (C.isCLibraryFunction(FDecl, "memset"))
+ else if (C.isCLibraryFunction(FDecl, "memset") ||
+ C.isCLibraryFunction(FDecl, "explicit_memset"))
evalFunction = &CStringChecker::evalMemset;
else if (C.isCLibraryFunction(FDecl, "strcpy"))
evalFunction = &CStringChecker::evalStrcpy;
@@ -2261,6 +2309,9 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
evalFunction = &CStringChecker::evalStdCopy;
else if (isCPPStdLibraryFunction(FDecl, "copy_backward"))
evalFunction = &CStringChecker::evalStdCopyBackward;
+ else if (C.isCLibraryFunction(FDecl, "bzero") ||
+ C.isCLibraryFunction(FDecl, "explicit_bzero"))
+ evalFunction = &CStringChecker::evalBzero;
// If the callee isn't a string function, let another checker handle it.
if (!evalFunction)
@@ -2384,9 +2435,6 @@ void CStringChecker::checkLiveSymbols(ProgramStateRef state,
void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
- if (!SR.hasDeadSymbols())
- return;
-
ProgramStateRef state = C.getState();
CStringLengthTy Entries = state->get<CStringLength>();
if (Entries.isEmpty())
diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 1f9bdeb059..400b719cfd 100644
--- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -108,7 +108,7 @@ void CallAndMessageChecker::emitBadCall(BugType *BT, CheckerContext &C,
R->addRange(BadE->getSourceRange());
if (BadE->isGLValue())
BadE = bugreporter::getDerefExpr(BadE);
- bugreporter::trackNullOrUndefValue(N, BadE, *R);
+ bugreporter::trackExpressionValue(N, BadE, *R);
}
C.emitReport(std::move(R));
}
@@ -185,9 +185,9 @@ bool CallAndMessageChecker::uninitRefOrPointer(
LazyInit_BT(BD, BT);
auto R = llvm::make_unique<BugReport>(*BT, Os.str(), N);
R->addRange(ArgRange);
- if (ArgEx) {
- bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
- }
+ if (ArgEx)
+ bugreporter::trackExpressionValue(N, ArgEx, *R);
+
C.emitReport(std::move(R));
}
return true;
@@ -196,6 +196,7 @@ bool CallAndMessageChecker::uninitRefOrPointer(
return false;
}
+namespace {
class FindUninitializedField {
public:
SmallVector<const FieldDecl *, 10> FieldChain;
@@ -234,6 +235,7 @@ public:
return false;
}
};
+} // namespace
bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
SVal V,
@@ -262,7 +264,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
R->addRange(ArgRange);
if (ArgEx)
- bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
+ bugreporter::trackExpressionValue(N, ArgEx, *R);
C.emitReport(std::move(R));
}
return true;
@@ -305,7 +307,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
R->addRange(ArgRange);
if (ArgEx)
- bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
+ bugreporter::trackExpressionValue(N, ArgEx, *R);
// FIXME: enhance track back for uninitialized value for arbitrary
// memregions
C.emitReport(std::move(R));
@@ -365,7 +367,7 @@ void CallAndMessageChecker::checkPreStmt(const CXXDeleteExpr *DE,
Desc = "Argument to 'delete' is uninitialized";
BugType *BT = BT_cxx_delete_undef.get();
auto R = llvm::make_unique<BugReport>(*BT, Desc, N);
- bugreporter::trackNullOrUndefValue(N, DE, *R);
+ bugreporter::trackExpressionValue(N, DE, *R);
C.emitReport(std::move(R));
return;
}
@@ -494,7 +496,7 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
// FIXME: getTrackNullOrUndefValueVisitor can't handle "super" yet.
if (const Expr *ReceiverE = ME->getInstanceReceiver())
- bugreporter::trackNullOrUndefValue(N, ReceiverE, *R);
+ bugreporter::trackExpressionValue(N, ReceiverE, *R);
C.emitReport(std::move(R));
}
return;
@@ -535,7 +537,7 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
report->addRange(ME->getReceiverRange());
// FIXME: This won't track "self" in messages to super.
if (const Expr *receiver = ME->getInstanceReceiver()) {
- bugreporter::trackNullOrUndefValue(N, receiver, *report);
+ bugreporter::trackExpressionValue(N, receiver, *report);
}
C.emitReport(std::move(report));
}
diff --git a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index 202233acff..87d7d90ee2 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -188,7 +188,7 @@ void WalkAST::VisitForStmt(ForStmt *FS) {
}
//===----------------------------------------------------------------------===//
-// Check: floating poing variable used as loop counter.
+// Check: floating point variable used as loop counter.
// Originally: <rdar://problem/6336718>
// Implements: CERT security coding advisory FLP-30.
//===----------------------------------------------------------------------===//
@@ -597,9 +597,10 @@ void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
unsigned suffix = 0;
if (ArgSuffix.second >= 0) {
const Expr *suffixEx = CE->getArg((unsigned)ArgSuffix.second);
- llvm::APSInt Result;
- if (!suffixEx->EvaluateAsInt(Result, BR.getContext()))
+ Expr::EvalResult EVResult;
+ if (!suffixEx->EvaluateAsInt(EVResult, BR.getContext()))
return;
+ llvm::APSInt Result = EVResult.Val.getInt();
// FIXME: Issue a warning.
if (Result.isNegative())
return;
diff --git a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 7862a4c256..e5f2937300 100644
--- a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -169,7 +169,7 @@ public:
/// This callback should be used by the checkers to aggressively clean
/// up/reduce the checker state, which is important for reducing the overall
/// memory usage. Specifically, if a checker keeps symbol specific information
- /// in the sate, it can and should be dropped after the symbol becomes dead.
+ /// in the state, it can and should be dropped after the symbol becomes dead.
/// In addition, reporting a bug as soon as the checker becomes dead leads to
/// more precise diagnostics. (For example, one should report that a malloced
/// variable is not freed right after it goes out of scope.)
diff --git a/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp b/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
index fb9e366c3d..d12e421d31 100644
--- a/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
+++ b/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
@@ -25,7 +25,7 @@ using namespace ento;
void ento::registerBuiltinCheckers(CheckerRegistry &registry) {
#define GET_CHECKERS
-#define CHECKER(FULLNAME,CLASS,DESCFILE,HELPTEXT,GROUPINDEX,HIDDEN) \
+#define CHECKER(FULLNAME, CLASS, HELPTEXT) \
registry.addChecker(register##CLASS, FULLNAME, HELPTEXT);
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef GET_CHECKERS
diff --git a/lib/StaticAnalyzer/Checkers/ClangSACheckers.h b/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
index d6e96f27a7..cd42cd6cd3 100644
--- a/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
+++ b/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
@@ -24,7 +24,7 @@ class CheckerManager;
class CheckerRegistry;
#define GET_CHECKERS
-#define CHECKER(FULLNAME,CLASS,CXXFILE,HELPTEXT,GROUPINDEX,HIDDEN) \
+#define CHECKER(FULLNAME, CLASS, HELPTEXT) \
void register##CLASS(CheckerManager &mgr);
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER
diff --git a/lib/StaticAnalyzer/Checkers/CloneChecker.cpp b/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
index ee517ed977..427b9c4ad2 100644
--- a/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
@@ -42,7 +42,7 @@ public:
void reportClones(BugReporter &BR, AnalysisManager &Mgr,
std::vector<CloneDetector::CloneGroup> &CloneGroups) const;
- /// Reports only suspicious clones to the user along with informaton
+ /// Reports only suspicious clones to the user along with information
/// that explain why they are suspicious.
void reportSuspiciousClones(
BugReporter &BR, AnalysisManager &Mgr,
@@ -63,18 +63,18 @@ void CloneChecker::checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
// At this point, every statement in the translation unit has been analyzed by
// the CloneDetector. The only thing left to do is to report the found clones.
- int MinComplexity = Mgr.getAnalyzerOptions().getOptionAsInteger(
+ int MinComplexity = Mgr.getAnalyzerOptions().getCheckerIntegerOption(
"MinimumCloneComplexity", 50, this);
assert(MinComplexity >= 0);
- bool ReportSuspiciousClones = Mgr.getAnalyzerOptions().getBooleanOption(
- "ReportSuspiciousClones", true, this);
+ bool ReportSuspiciousClones = Mgr.getAnalyzerOptions()
+ .getCheckerBooleanOption("ReportSuspiciousClones", true, this);
- bool ReportNormalClones = Mgr.getAnalyzerOptions().getBooleanOption(
+ bool ReportNormalClones = Mgr.getAnalyzerOptions().getCheckerBooleanOption(
"ReportNormalClones", true, this);
- StringRef IgnoredFilesPattern = Mgr.getAnalyzerOptions().getOptionAsString(
- "IgnoredFilesPattern", "", this);
+ StringRef IgnoredFilesPattern = Mgr.getAnalyzerOptions()
+ .getCheckerStringOption("IgnoredFilesPattern", "", this);
// Let the CloneDetector create a list of clones from all the analyzed
// statements. We don't filter for matching variable patterns at this point
diff --git a/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp b/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
index 17ec2c2887..208f94451c 100644
--- a/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
@@ -14,8 +14,10 @@
// of expressions. A warning is reported when:
// * a negative value is implicitly converted to an unsigned value in an
// assignment, comparison or multiplication.
-// * assignment / initialization when source value is greater than the max
-// value of target
+// * assignment / initialization when the source value is greater than the max
+// value of the target integer type
+// * assignment / initialization when the source integer is above the range
+// where the target floating point type can represent all integers
//
// Many compilers and tools have similar checks that are based on semantic
// analysis. Those checks are sound but have poor precision. ConversionChecker
@@ -28,6 +30,9 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/APFloat.h"
+
+#include <climits>
using namespace clang;
using namespace ento;
@@ -40,11 +45,9 @@ public:
private:
mutable std::unique_ptr<BuiltinBug> BT;
- // Is there loss of precision
bool isLossOfPrecision(const ImplicitCastExpr *Cast, QualType DestType,
CheckerContext &C) const;
- // Is there loss of sign
bool isLossOfSign(const ImplicitCastExpr *Cast, CheckerContext &C) const;
void reportBug(ExplodedNode *N, CheckerContext &C, const char Msg[]) const;
@@ -132,19 +135,51 @@ bool ConversionChecker::isLossOfPrecision(const ImplicitCastExpr *Cast,
QualType SubType = Cast->IgnoreParenImpCasts()->getType();
- if (!DestType->isIntegerType() || !SubType->isIntegerType())
+ if (!DestType->isRealType() || !SubType->isIntegerType())
return false;
- if (C.getASTContext().getIntWidth(DestType) >=
- C.getASTContext().getIntWidth(SubType))
+ const bool isFloat = DestType->isFloatingType();
+
+ const auto &AC = C.getASTContext();
+
+ // We will find the largest RepresentsUntilExp value such that the DestType
+ // can exactly represent all nonnegative integers below 2^RepresentsUntilExp.
+ unsigned RepresentsUntilExp;
+
+ if (isFloat) {
+ const llvm::fltSemantics &Sema = AC.getFloatTypeSemantics(DestType);
+ RepresentsUntilExp = llvm::APFloat::semanticsPrecision(Sema);
+ } else {
+ RepresentsUntilExp = AC.getIntWidth(DestType);
+ if (RepresentsUntilExp == 1) {
+ // This is just casting a number to bool, probably not a bug.
+ return false;
+ }
+ if (DestType->isSignedIntegerType())
+ RepresentsUntilExp--;
+ }
+
+ if (RepresentsUntilExp >= sizeof(unsigned long long) * CHAR_BIT) {
+ // Avoid overflow in our later calculations.
return false;
+ }
+
+ unsigned CorrectedSrcWidth = AC.getIntWidth(SubType);
+ if (SubType->isSignedIntegerType())
+ CorrectedSrcWidth--;
- unsigned W = C.getASTContext().getIntWidth(DestType);
- if (W == 1 || W >= 64U)
+ if (RepresentsUntilExp >= CorrectedSrcWidth) {
+ // Simple case: the destination can store all values of the source type.
return false;
+ }
- unsigned long long MaxVal = 1ULL << W;
+ unsigned long long MaxVal = 1ULL << RepresentsUntilExp;
+ if (isFloat) {
+ // If this is a floating point type, it can also represent MaxVal exactly.
+ MaxVal++;
+ }
return C.isGreaterOrEqual(Cast->getSubExpr(), MaxVal);
+ // TODO: maybe also check negative values with too large magnitude.
}
bool ConversionChecker::isLossOfSign(const ImplicitCastExpr *Cast,
diff --git a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 76a9a7c567..7446eadf34 100644
--- a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -329,9 +329,8 @@ public:
return;
if (const Expr *E = V->getInit()) {
- while (const ExprWithCleanups *exprClean =
- dyn_cast<ExprWithCleanups>(E))
- E = exprClean->getSubExpr();
+ while (const FullExpr *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr();
// Look through transitive assignments, e.g.:
// int x = y = 0;
diff --git a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 810a33ed40..60027f4a8b 100644
--- a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -182,7 +182,9 @@ public:
llvm::errs() << "[config]\n";
for (unsigned I = 0, E = Keys.size(); I != E; ++I)
- llvm::errs() << Keys[I]->getKey() << " = " << Keys[I]->second << '\n';
+ llvm::errs() << Keys[I]->getKey() << " = "
+ << (Keys[I]->second.empty() ? "\"\"" : Keys[I]->second)
+ << '\n';
llvm::errs() << "[stats]\n" << "num-entries = " << Keys.size() << '\n';
}
diff --git a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 152b937bb0..368d5ce357 100644
--- a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -111,6 +111,12 @@ static bool suppressReport(const Expr *E) {
return E->getType().getQualifiers().hasAddressSpace();
}
+static bool isDeclRefExprToReference(const Expr *E) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getDecl()->getType()->isReferenceType();
+ return false;
+}
+
void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
CheckerContext &C) const {
// Generate an error node.
@@ -154,7 +160,7 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
}
case Stmt::MemberExprClass: {
const MemberExpr *M = cast<MemberExpr>(S);
- if (M->isArrow() || bugreporter::isDeclRefExprToReference(M->getBase())) {
+ if (M->isArrow() || isDeclRefExprToReference(M->getBase())) {
os << "Access to field '" << M->getMemberNameInfo()
<< "' results in a dereference of a null pointer";
AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
@@ -177,7 +183,7 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
auto report = llvm::make_unique<BugReport>(
*BT_null, buf.empty() ? BT_null->getDescription() : StringRef(buf), N);
- bugreporter::trackNullOrUndefValue(N, bugreporter::getDerefExpr(S), *report);
+ bugreporter::trackExpressionValue(N, bugreporter::getDerefExpr(S), *report);
for (SmallVectorImpl<SourceRange>::iterator
I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
@@ -197,8 +203,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
auto report =
llvm::make_unique<BugReport>(*BT_undef, BT_undef->getDescription(), N);
- bugreporter::trackNullOrUndefValue(N, bugreporter::getDerefExpr(S),
- *report);
+ bugreporter::trackExpressionValue(N, bugreporter::getDerefExpr(S), *report);
C.emitReport(std::move(report));
}
return;
diff --git a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index bc39c92ea9..5e10fa99fb 100644
--- a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -32,6 +32,13 @@ public:
};
} // end anonymous namespace
+static const Expr *getDenomExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+ if (const auto *BE = dyn_cast<BinaryOperator>(S))
+ return BE->getRHS();
+ return nullptr;
+}
+
void DivZeroChecker::reportBug(
const char *Msg, ProgramStateRef StateZero, CheckerContext &C,
std::unique_ptr<BugReporterVisitor> Visitor) const {
@@ -41,7 +48,7 @@ void DivZeroChecker::reportBug(
auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
R->addVisitor(std::move(Visitor));
- bugreporter::trackNullOrUndefValue(N, bugreporter::GetDenomExpr(N), *R);
+ bugreporter::trackExpressionValue(N, getDenomExpr(N), *R);
C.emitReport(std::move(R));
}
}
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index b5a3c7187f..f83a0ec075 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -123,11 +123,6 @@ void DynamicTypePropagation::checkDeadSymbols(SymbolReaper &SR,
}
}
- if (!SR.hasDeadSymbols()) {
- C.addTransition(State);
- return;
- }
-
MostSpecializedTypeArgsMapTy TyArgMap =
State->get<MostSpecializedTypeArgsMap>();
for (MostSpecializedTypeArgsMapTy::iterator I = TyArgMap.begin(),
diff --git a/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp b/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
new file mode 100644
index 0000000000..f3a35daf07
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
@@ -0,0 +1,128 @@
+//===- EnumCastOutOfRangeChecker.cpp ---------------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The EnumCastOutOfRangeChecker is responsible for checking integer to
+// enumeration casts that could result in undefined values. This could happen
+// if the value that we cast from is out of the value range of the enumeration.
+// Reference:
+// [ISO/IEC 14882-2014] ISO/IEC 14882-2014.
+// Programming Languages — C++, Fourth Edition. 2014.
+// C++ Standard, [dcl.enum], in paragraph 8, which defines the range of an enum
+// C++ Standard, [expr.static.cast], paragraph 10, which defines the behaviour
+// of casting an integer value that is out of range
+// SEI CERT C++ Coding Standard, INT50-CPP. Do not cast to an out-of-range
+// enumeration value
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+// This evaluator checks two SVals for equality. The first SVal is provided via
+// the constructor, the second is the parameter of the overloaded () operator.
+// It uses the in-built ConstraintManager to resolve the equlity to possible or
+// not possible ProgramStates.
+class ConstraintBasedEQEvaluator {
+ const DefinedOrUnknownSVal CompareValue;
+ const ProgramStateRef PS;
+ SValBuilder &SVB;
+
+public:
+ ConstraintBasedEQEvaluator(CheckerContext &C,
+ const DefinedOrUnknownSVal CompareValue)
+ : CompareValue(CompareValue), PS(C.getState()), SVB(C.getSValBuilder()) {}
+
+ bool operator()(const llvm::APSInt &EnumDeclInitValue) {
+ DefinedOrUnknownSVal EnumDeclValue = SVB.makeIntVal(EnumDeclInitValue);
+ DefinedOrUnknownSVal ElemEqualsValueToCast =
+ SVB.evalEQ(PS, EnumDeclValue, CompareValue);
+
+ return static_cast<bool>(PS->assume(ElemEqualsValueToCast, true));
+ }
+};
+
+// This checker checks CastExpr statements.
+// If the value provided to the cast is one of the values the enumeration can
+// represent, the said value matches the enumeration. If the checker can
+// establish the impossibility of matching it gives a warning.
+// Being conservative, it does not warn if there is slight possibility the
+// value can be matching.
+class EnumCastOutOfRangeChecker : public Checker<check::PreStmt<CastExpr>> {
+ mutable std::unique_ptr<BuiltinBug> EnumValueCastOutOfRange;
+ void reportWarning(CheckerContext &C) const;
+
+public:
+ void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
+};
+
+using EnumValueVector = llvm::SmallVector<llvm::APSInt, 6>;
+
+// Collects all of the values an enum can represent (as SVals).
+EnumValueVector getDeclValuesForEnum(const EnumDecl *ED) {
+ EnumValueVector DeclValues(
+ std::distance(ED->enumerator_begin(), ED->enumerator_end()));
+ llvm::transform(ED->enumerators(), DeclValues.begin(),
+ [](const EnumConstantDecl *D) { return D->getInitVal(); });
+ return DeclValues;
+}
+} // namespace
+
+void EnumCastOutOfRangeChecker::reportWarning(CheckerContext &C) const {
+ if (const ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ if (!EnumValueCastOutOfRange)
+ EnumValueCastOutOfRange.reset(
+ new BuiltinBug(this, "Enum cast out of range",
+ "The value provided to the cast expression is not in "
+ "the valid range of values for the enum"));
+ C.emitReport(llvm::make_unique<BugReport>(
+ *EnumValueCastOutOfRange, EnumValueCastOutOfRange->getDescription(),
+ N));
+ }
+}
+
+void EnumCastOutOfRangeChecker::checkPreStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ // Get the value of the expression to cast.
+ const llvm::Optional<DefinedOrUnknownSVal> ValueToCast =
+ C.getSVal(CE->getSubExpr()).getAs<DefinedOrUnknownSVal>();
+
+ // If the value cannot be reasoned about (not even a DefinedOrUnknownSVal),
+ // don't analyze further.
+ if (!ValueToCast)
+ return;
+
+ const QualType T = CE->getType();
+ // Check whether the cast type is an enum.
+ if (!T->isEnumeralType())
+ return;
+
+ // If the cast is an enum, get its declaration.
+ // If the isEnumeralType() returned true, then the declaration must exist
+ // even if it is a stub declaration. It is up to the getDeclValuesForEnum()
+ // function to handle this.
+ const EnumDecl *ED = T->castAs<EnumType>()->getDecl();
+
+ EnumValueVector DeclValues = getDeclValuesForEnum(ED);
+ // Check if any of the enum values possibly match.
+ bool PossibleValueMatch = llvm::any_of(
+ DeclValues, ConstraintBasedEQEvaluator(C, *ValueToCast));
+
+ // If there is no value that can possibly match any of the enum values, then
+ // warn.
+ if (!PossibleValueMatch)
+ reportWarning(C);
+}
+
+void ento::registerEnumCastOutOfRangeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<EnumCastOutOfRangeChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 7c53b2d21a..0752dba49c 100644
--- a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -337,6 +337,7 @@ void ExprInspectionChecker::analyzerDenote(const CallExpr *CE,
C.addTransition(C.getState()->set<DenotedSymbols>(Sym, E));
}
+namespace {
class SymbolExpressor
: public SymExprVisitor<SymbolExpressor, Optional<std::string>> {
ProgramStateRef State;
@@ -369,6 +370,7 @@ public:
return None;
}
};
+} // namespace
void ExprInspectionChecker::analyzerExpress(const CallExpr *CE,
CheckerContext &C) const {
diff --git a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
index 7a71751342..0b8f677a4a 100644
--- a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
@@ -238,14 +238,17 @@ class IteratorChecker
void handleEraseAfter(CheckerContext &C, const SVal &Iter) const;
void handleEraseAfter(CheckerContext &C, const SVal &Iter1,
const SVal &Iter2) const;
+ void verifyIncrement(CheckerContext &C, const SVal &Iter) const;
+ void verifyDecrement(CheckerContext &C, const SVal &Iter) const;
void verifyRandomIncrOrDecr(CheckerContext &C, OverloadedOperatorKind Op,
- const SVal &RetVal, const SVal &LHS,
- const SVal &RHS) const;
+ const SVal &LHS, const SVal &RHS) const;
void verifyMatch(CheckerContext &C, const SVal &Iter,
const MemRegion *Cont) const;
void verifyMatch(CheckerContext &C, const SVal &Iter1,
const SVal &Iter2) const;
-
+ IteratorPosition advancePosition(CheckerContext &C, OverloadedOperatorKind Op,
+ const IteratorPosition &Pos,
+ const SVal &Distance) const;
void reportOutOfRangeBug(const StringRef &Message, const SVal &Val,
CheckerContext &C, ExplodedNode *ErrNode) const;
void reportMismatchedBug(const StringRef &Message, const SVal &Val1,
@@ -388,7 +391,9 @@ ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont);
bool isBoundThroughLazyCompoundVal(const Environment &Env,
const MemRegion *Reg);
-bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos);
+bool isPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos);
+bool isAheadOfRange(ProgramStateRef State, const IteratorPosition &Pos);
+bool isBehindPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos);
bool isZero(ProgramStateRef State, const NonLoc &Val);
} // namespace
@@ -422,29 +427,46 @@ void IteratorChecker::checkPreCall(const CallEvent &Call,
verifyAccess(C, Call.getArgSVal(0));
}
}
- if (ChecksEnabled[CK_IteratorRangeChecker] &&
- isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- // Check for out-of-range incrementions and decrementions
- if (Call.getNumArgs() >= 1) {
- verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
- Call.getReturnValue(),
- InstCall->getCXXThisVal(), Call.getArgSVal(0));
+ if (ChecksEnabled[CK_IteratorRangeChecker]) {
+ if (isIncrementOperator(Func->getOverloadedOperator())) {
+ // Check for out-of-range incrementions
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ verifyIncrement(C, InstCall->getCXXThisVal());
+ } else {
+ if (Call.getNumArgs() >= 1) {
+ verifyIncrement(C, Call.getArgSVal(0));
+ }
}
- } else {
- if (Call.getNumArgs() >= 2) {
- verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
- Call.getReturnValue(), Call.getArgSVal(0),
- Call.getArgSVal(1));
+ } else if (isDecrementOperator(Func->getOverloadedOperator())) {
+ // Check for out-of-range decrementions
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ verifyDecrement(C, InstCall->getCXXThisVal());
+ } else {
+ if (Call.getNumArgs() >= 1) {
+ verifyDecrement(C, Call.getArgSVal(0));
+ }
+ }
+ } else if (isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ // Check for out-of-range incrementions and decrementions
+ if (Call.getNumArgs() >= 1) {
+ verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
+ InstCall->getCXXThisVal(),
+ Call.getArgSVal(0));
+ }
+ } else {
+ if (Call.getNumArgs() >= 2) {
+ verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
+ Call.getArgSVal(0), Call.getArgSVal(1));
+ }
+ }
+ } else if (isDereferenceOperator(Func->getOverloadedOperator())) {
+ // Check for dereference of out-of-range iterators
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ verifyDereference(C, InstCall->getCXXThisVal());
+ } else {
+ verifyDereference(C, Call.getArgSVal(0));
}
- }
- } else if (ChecksEnabled[CK_IteratorRangeChecker] &&
- isDereferenceOperator(Func->getOverloadedOperator())) {
- // Check for dereference of out-of-range iterators
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- verifyDereference(C, InstCall->getCXXThisVal());
- } else {
- verifyDereference(C, Call.getArgSVal(0));
}
} else if (ChecksEnabled[CK_MismatchedIteratorChecker] &&
isComparisonOperator(Func->getOverloadedOperator())) {
@@ -529,7 +551,7 @@ void IteratorChecker::checkPreCall(const CallEvent &Call,
//
// In this case the first two arguments to f() must be iterators must belong
// to the same container and the last to also to the same container but
- // not neccessarily to the same as the first two.
+ // not necessarily to the same as the first two.
if (!ChecksEnabled[CK_MismatchedIteratorChecker])
return;
@@ -895,11 +917,12 @@ void IteratorChecker::verifyDereference(CheckerContext &C,
const SVal &Val) const {
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Val);
- if (Pos && isOutOfRange(State, *Pos)) {
+ if (Pos && isPastTheEnd(State, *Pos)) {
auto *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
- reportOutOfRangeBug("Iterator accessed outside of its range.", Val, C, N);
+ reportOutOfRangeBug("Past-the-end iterator dereferenced.", Val, C, N);
+ return;
}
}
@@ -924,14 +947,9 @@ void IteratorChecker::handleIncrement(CheckerContext &C, const SVal &RetVal,
if (Pos) {
auto &SymMgr = C.getSymbolManager();
auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto OldOffset = Pos->getOffset();
- auto NewOffset =
- SVB.evalBinOp(State, BO_Add,
- nonloc::SymbolVal(OldOffset),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(OldOffset)).getAsSymbol();
- auto NewPos = Pos->setTo(NewOffset);
+ const auto NewPos =
+ advancePosition(C, OO_Plus, *Pos,
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
State = setIteratorPosition(State, Iter, NewPos);
State = setIteratorPosition(State, RetVal, Postfix ? *Pos : NewPos);
C.addTransition(State);
@@ -947,14 +965,9 @@ void IteratorChecker::handleDecrement(CheckerContext &C, const SVal &RetVal,
if (Pos) {
auto &SymMgr = C.getSymbolManager();
auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto OldOffset = Pos->getOffset();
- auto NewOffset =
- SVB.evalBinOp(State, BO_Sub,
- nonloc::SymbolVal(OldOffset),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(OldOffset)).getAsSymbol();
- auto NewPos = Pos->setTo(NewOffset);
+ const auto NewPos =
+ advancePosition(C, OO_Minus, *Pos,
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
State = setIteratorPosition(State, Iter, NewPos);
State = setIteratorPosition(State, RetVal, Postfix ? *Pos : NewPos);
C.addTransition(State);
@@ -1020,78 +1033,71 @@ void IteratorChecker::handleRandomIncrOrDecr(CheckerContext &C,
value = &val;
}
- auto &SymMgr = C.getSymbolManager();
- auto &SVB = C.getSValBuilder();
- auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
- const auto OldOffset = Pos->getOffset();
- SymbolRef NewOffset;
- if (const auto intValue = value->getAs<nonloc::ConcreteInt>()) {
- // For concrete integers we can calculate the new position
- NewOffset = SVB.evalBinOp(State, BinOp, nonloc::SymbolVal(OldOffset),
- *intValue,
- SymMgr.getType(OldOffset)).getAsSymbol();
- } else {
- // For other symbols create a new symbol to keep expressions simple
- const auto &LCtx = C.getLocationContext();
- NewOffset = SymMgr.conjureSymbol(nullptr, LCtx, SymMgr.getType(OldOffset),
- C.blockCount());
- State = assumeNoOverflow(State, NewOffset, 4);
- }
- auto NewPos = Pos->setTo(NewOffset);
auto &TgtVal = (Op == OO_PlusEqual || Op == OO_MinusEqual) ? LHS : RetVal;
- State = setIteratorPosition(State, TgtVal, NewPos);
+ State =
+ setIteratorPosition(State, TgtVal, advancePosition(C, Op, *Pos, *value));
C.addTransition(State);
}
+void IteratorChecker::verifyIncrement(CheckerContext &C,
+ const SVal &Iter) const {
+ auto &BVF = C.getSValBuilder().getBasicValueFactory();
+ verifyRandomIncrOrDecr(C, OO_Plus, Iter,
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
+}
+
+void IteratorChecker::verifyDecrement(CheckerContext &C,
+ const SVal &Iter) const {
+ auto &BVF = C.getSValBuilder().getBasicValueFactory();
+ verifyRandomIncrOrDecr(C, OO_Minus, Iter,
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
+}
+
void IteratorChecker::verifyRandomIncrOrDecr(CheckerContext &C,
OverloadedOperatorKind Op,
- const SVal &RetVal,
const SVal &LHS,
const SVal &RHS) const {
auto State = C.getState();
// If the iterator is initially inside its range, then the operation is valid
const auto *Pos = getIteratorPosition(State, LHS);
- if (!Pos || !isOutOfRange(State, *Pos))
+ if (!Pos)
return;
- auto value = RHS;
- if (auto loc = RHS.getAs<Loc>()) {
- value = State->getRawSVal(*loc);
+ auto Value = RHS;
+ if (auto ValAsLoc = RHS.getAs<Loc>()) {
+ Value = State->getRawSVal(*ValAsLoc);
}
- // Incremention or decremention by 0 is never bug
- if (isZero(State, value.castAs<NonLoc>()))
+ if (Value.isUnknown())
return;
- auto &SymMgr = C.getSymbolManager();
- auto &SVB = C.getSValBuilder();
- auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
- const auto OldOffset = Pos->getOffset();
- const auto intValue = value.getAs<nonloc::ConcreteInt>();
- if (!intValue)
+ // Incremention or decremention by 0 is never a bug.
+ if (isZero(State, Value.castAs<NonLoc>()))
return;
- auto NewOffset = SVB.evalBinOp(State, BinOp, nonloc::SymbolVal(OldOffset),
- *intValue,
- SymMgr.getType(OldOffset)).getAsSymbol();
- auto NewPos = Pos->setTo(NewOffset);
-
- // If out of range, the only valid operation is to step into the range
- if (isOutOfRange(State, NewPos)) {
+ // The result may be the past-end iterator of the container, but any other
+ // out of range position is undefined behaviour
+ if (isAheadOfRange(State, advancePosition(C, Op, *Pos, Value))) {
auto *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
- reportOutOfRangeBug("Iterator accessed past its end.", LHS, C, N);
+ reportOutOfRangeBug("Iterator decremented ahead of its valid range.", LHS,
+ C, N);
+ }
+ if (isBehindPastTheEnd(State, advancePosition(C, Op, *Pos, Value))) {
+ auto *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return;
+ reportOutOfRangeBug("Iterator incremented behind the past-the-end "
+ "iterator.", LHS, C, N);
}
}
void IteratorChecker::verifyMatch(CheckerContext &C, const SVal &Iter,
const MemRegion *Cont) const {
// Verify match between a container and the container of an iterator
- while (const auto *CBOR = Cont->getAs<CXXBaseObjectRegion>()) {
- Cont = CBOR->getSuperRegion();
- }
+ Cont = Cont->getMostDerivedObjectRegion();
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Iter);
@@ -1125,9 +1131,7 @@ void IteratorChecker::handleBegin(CheckerContext &C, const Expr *CE,
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
// If the container already has a begin symbol then use it. Otherwise first
// create a new one.
@@ -1151,9 +1155,7 @@ void IteratorChecker::handleEnd(CheckerContext &C, const Expr *CE,
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
// If the container already has an end symbol then use it. Otherwise first
// create a new one.
@@ -1174,9 +1176,7 @@ void IteratorChecker::handleEnd(CheckerContext &C, const Expr *CE,
void IteratorChecker::assignToContainer(CheckerContext &C, const Expr *CE,
const SVal &RetVal,
const MemRegion *Cont) const {
- while (const auto *CBOR = Cont->getAs<CXXBaseObjectRegion>()) {
- Cont = CBOR->getSuperRegion();
- }
+ Cont = Cont->getMostDerivedObjectRegion();
auto State = C.getState();
auto &SymMgr = C.getSymbolManager();
@@ -1194,9 +1194,7 @@ void IteratorChecker::handleAssign(CheckerContext &C, const SVal &Cont,
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
// Assignment of a new value to a container always invalidates all its
// iterators
@@ -1211,13 +1209,11 @@ void IteratorChecker::handleAssign(CheckerContext &C, const SVal &Cont,
if (!OldCont.isUndef()) {
const auto *OldContReg = OldCont.getAsRegion();
if (OldContReg) {
- while (const auto *CBOR = OldContReg->getAs<CXXBaseObjectRegion>()) {
- OldContReg = CBOR->getSuperRegion();
- }
+ OldContReg = OldContReg->getMostDerivedObjectRegion();
const auto OldCData = getContainerData(State, OldContReg);
if (OldCData) {
if (const auto OldEndSym = OldCData->getEnd()) {
- // If we already assigned an "end" symbol to the old conainer, then
+ // If we already assigned an "end" symbol to the old container, then
// first reassign all iterator positions to the new container which
// are not past the container (thus not greater or equal to the
// current "end" symbol).
@@ -1273,9 +1269,7 @@ void IteratorChecker::handleClear(CheckerContext &C, const SVal &Cont) const {
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
// The clear() operation invalidates all the iterators, except the past-end
// iterators of list-like containers
@@ -1302,9 +1296,7 @@ void IteratorChecker::handlePushBack(CheckerContext &C,
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
// For deque-like containers invalidate all iterator positions
auto State = C.getState();
@@ -1341,9 +1333,7 @@ void IteratorChecker::handlePopBack(CheckerContext &C, const SVal &Cont) const {
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
auto State = C.getState();
const auto CData = getContainerData(State, ContReg);
@@ -1381,9 +1371,7 @@ void IteratorChecker::handlePushFront(CheckerContext &C,
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
// For deque-like containers invalidate all iterator positions
auto State = C.getState();
@@ -1416,9 +1404,7 @@ void IteratorChecker::handlePopFront(CheckerContext &C,
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
auto State = C.getState();
const auto CData = getContainerData(State, ContReg);
@@ -1566,6 +1552,35 @@ void IteratorChecker::handleEraseAfter(CheckerContext &C, const SVal &Iter1,
C.addTransition(State);
}
+IteratorPosition IteratorChecker::advancePosition(CheckerContext &C,
+ OverloadedOperatorKind Op,
+ const IteratorPosition &Pos,
+ const SVal &Distance) const {
+ auto State = C.getState();
+ auto &SymMgr = C.getSymbolManager();
+ auto &SVB = C.getSValBuilder();
+
+ assert ((Op == OO_Plus || Op == OO_PlusEqual ||
+ Op == OO_Minus || Op == OO_MinusEqual) &&
+ "Advance operator must be one of +, -, += and -=.");
+ auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
+ if (const auto IntDist = Distance.getAs<nonloc::ConcreteInt>()) {
+ // For concrete integers we can calculate the new position
+ return Pos.setTo(SVB.evalBinOp(State, BinOp,
+ nonloc::SymbolVal(Pos.getOffset()), *IntDist,
+ SymMgr.getType(Pos.getOffset()))
+ .getAsSymbol());
+ } else {
+ // For other symbols create a new symbol to keep expressions simple
+ const auto &LCtx = C.getLocationContext();
+ const auto NewPosSym = SymMgr.conjureSymbol(nullptr, LCtx,
+ SymMgr.getType(Pos.getOffset()),
+ C.blockCount());
+ State = assumeNoOverflow(State, NewPosSym, 4);
+ return Pos.setTo(NewPosSym);
+ }
+}
+
void IteratorChecker::reportOutOfRangeBug(const StringRef &Message,
const SVal &Val, CheckerContext &C,
ExplodedNode *ErrNode) const {
@@ -1605,7 +1620,8 @@ void IteratorChecker::reportInvalidatedBug(const StringRef &Message,
namespace {
bool isLess(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
-bool isGreaterOrEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
+bool isGreater(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
+bool isEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
BinaryOperator::Opcode Opc);
bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
@@ -2015,7 +2031,8 @@ ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
const IteratorPosition *getIteratorPosition(ProgramStateRef State,
const SVal &Val) {
- if (const auto Reg = Val.getAsRegion()) {
+ if (auto Reg = Val.getAsRegion()) {
+ Reg = Reg->getMostDerivedObjectRegion();
return State->get<IteratorRegionMap>(Reg);
} else if (const auto Sym = Val.getAsSymbol()) {
return State->get<IteratorSymbolMap>(Sym);
@@ -2028,7 +2045,8 @@ const IteratorPosition *getIteratorPosition(ProgramStateRef State,
const IteratorPosition *getIteratorPosition(ProgramStateRef State,
RegionOrSymbol RegOrSym) {
if (RegOrSym.is<const MemRegion *>()) {
- return State->get<IteratorRegionMap>(RegOrSym.get<const MemRegion *>());
+ auto Reg = RegOrSym.get<const MemRegion *>()->getMostDerivedObjectRegion();
+ return State->get<IteratorRegionMap>(Reg);
} else if (RegOrSym.is<SymbolRef>()) {
return State->get<IteratorSymbolMap>(RegOrSym.get<SymbolRef>());
}
@@ -2037,7 +2055,8 @@ const IteratorPosition *getIteratorPosition(ProgramStateRef State,
ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
const IteratorPosition &Pos) {
- if (const auto Reg = Val.getAsRegion()) {
+ if (auto Reg = Val.getAsRegion()) {
+ Reg = Reg->getMostDerivedObjectRegion();
return State->set<IteratorRegionMap>(Reg, Pos);
} else if (const auto Sym = Val.getAsSymbol()) {
return State->set<IteratorSymbolMap>(Sym, Pos);
@@ -2051,8 +2070,8 @@ ProgramStateRef setIteratorPosition(ProgramStateRef State,
RegionOrSymbol RegOrSym,
const IteratorPosition &Pos) {
if (RegOrSym.is<const MemRegion *>()) {
- return State->set<IteratorRegionMap>(RegOrSym.get<const MemRegion *>(),
- Pos);
+ auto Reg = RegOrSym.get<const MemRegion *>()->getMostDerivedObjectRegion();
+ return State->set<IteratorRegionMap>(Reg, Pos);
} else if (RegOrSym.is<SymbolRef>()) {
return State->set<IteratorSymbolMap>(RegOrSym.get<SymbolRef>(), Pos);
}
@@ -2060,7 +2079,8 @@ ProgramStateRef setIteratorPosition(ProgramStateRef State,
}
ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val) {
- if (const auto Reg = Val.getAsRegion()) {
+ if (auto Reg = Val.getAsRegion()) {
+ Reg = Reg->getMostDerivedObjectRegion();
return State->remove<IteratorRegionMap>(Reg);
} else if (const auto Sym = Val.getAsSymbol()) {
return State->remove<IteratorSymbolMap>(Sym);
@@ -2294,14 +2314,27 @@ bool isZero(ProgramStateRef State, const NonLoc &Val) {
BO_EQ);
}
-bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos) {
+bool isPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos) {
const auto *Cont = Pos.getContainer();
const auto *CData = getContainerData(State, Cont);
if (!CData)
return false;
- // Out of range means less than the begin symbol or greater or equal to the
- // end symbol.
+ const auto End = CData->getEnd();
+ if (End) {
+ if (isEqual(State, Pos.getOffset(), End)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool isAheadOfRange(ProgramStateRef State, const IteratorPosition &Pos) {
+ const auto *Cont = Pos.getContainer();
+ const auto *CData = getContainerData(State, Cont);
+ if (!CData)
+ return false;
const auto Beg = CData->getBegin();
if (Beg) {
@@ -2310,9 +2343,18 @@ bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos) {
}
}
+ return false;
+}
+
+bool isBehindPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos) {
+ const auto *Cont = Pos.getContainer();
+ const auto *CData = getContainerData(State, Cont);
+ if (!CData)
+ return false;
+
const auto End = CData->getEnd();
if (End) {
- if (isGreaterOrEqual(State, Pos.getOffset(), End)) {
+ if (isGreater(State, Pos.getOffset(), End)) {
return true;
}
}
@@ -2324,8 +2366,12 @@ bool isLess(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
return compare(State, Sym1, Sym2, BO_LT);
}
-bool isGreaterOrEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
- return compare(State, Sym1, Sym2, BO_GE);
+bool isGreater(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
+ return compare(State, Sym1, Sym2, BO_GT);
+}
+
+bool isEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
+ return compare(State, Sym1, Sym2, BO_EQ);
}
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
diff --git a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index db4fbca36d..18618d0459 100644
--- a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -32,8 +32,7 @@ static bool IsLLVMStringRef(QualType T) {
if (!RT)
return false;
- return StringRef(QualType(RT, 0).getAsString()) ==
- "class StringRef";
+ return StringRef(QualType(RT, 0).getAsString()) == "class StringRef";
}
/// Check whether the declaration is semantically inside the top-level
diff --git a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 0e51cf1184..103a33d39f 100644
--- a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -1398,7 +1398,8 @@ void ento::registerNonLocalizedStringChecker(CheckerManager &mgr) {
NonLocalizedStringChecker *checker =
mgr.registerChecker<NonLocalizedStringChecker>();
checker->IsAggressive =
- mgr.getAnalyzerOptions().getBooleanOption("AggressiveReport", false);
+ mgr.getAnalyzerOptions().getCheckerBooleanOption("AggressiveReport",
+ false, checker);
}
void ento::registerEmptyLocalizationContextChecker(CheckerManager &mgr) {
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
index 696cf39473..3f89c33cde 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -100,9 +100,6 @@ void MPIChecker::checkUnmatchedWaits(const CallEvent &PreCallEvent,
void MPIChecker::checkMissingWaits(SymbolReaper &SymReaper,
CheckerContext &Ctx) const {
- if (!SymReaper.hasDeadSymbols())
- return;
-
ProgramStateRef State = Ctx.getState();
const auto &Requests = State->get<RequestMap>();
if (Requests.isEmpty())
diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index cc29895e69..f5c7d52f4e 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
@@ -29,6 +30,7 @@ namespace {
class MacOSKeychainAPIChecker : public Checker<check::PreStmt<CallExpr>,
check::PostStmt<CallExpr>,
check::DeadSymbols,
+ check::PointerEscape,
eval::Assume> {
mutable std::unique_ptr<BugType> BT;
@@ -58,6 +60,10 @@ public:
void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
void checkPostStmt(const CallExpr *S, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ ProgramStateRef checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
void printState(raw_ostream &Out, ProgramStateRef State,
@@ -570,6 +576,44 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
C.addTransition(State, N);
}
+ProgramStateRef MacOSKeychainAPIChecker::checkPointerEscape(
+ ProgramStateRef State, const InvalidatedSymbols &Escaped,
+ const CallEvent *Call, PointerEscapeKind Kind) const {
+ // FIXME: This branch doesn't make any sense at all, but it is an overfitted
+ // replacement for a previous overfitted code that was making even less sense.
+ if (!Call || Call->getDecl())
+ return State;
+
+ for (auto I : State->get<AllocatedData>()) {
+ SymbolRef Sym = I.first;
+ if (Escaped.count(Sym))
+ State = State->remove<AllocatedData>(Sym);
+
+ // This checker is special. Most checkers in fact only track symbols of
+ // SymbolConjured type, eg. symbols returned from functions such as
+ // malloc(). This checker tracks symbols returned as out-parameters.
+ //
+ // When a function is evaluated conservatively, the out-parameter's pointee
+ // base region gets invalidated with a SymbolConjured. If the base region is
+ // larger than the region we're interested in, the value we're interested in
+ // would be SymbolDerived based on that SymbolConjured. However, such
+ // SymbolDerived will never be listed in the Escaped set when the base
+ // region is invalidated because ExprEngine doesn't know which symbols
+ // were derived from a given symbol, while there can be infinitely many
+ // valid symbols derived from any given symbol.
+ //
+ // Hence the extra boilerplate: remove the derived symbol when its parent
+ // symbol escapes.
+ //
+ if (const auto *SD = dyn_cast<SymbolDerived>(Sym)) {
+ SymbolRef ParentSym = SD->getParentSymbol();
+ if (Escaped.count(ParentSym))
+ State = State->remove<AllocatedData>(Sym);
+ }
+ }
+ return State;
+}
+
std::shared_ptr<PathDiagnosticPiece>
MacOSKeychainAPIChecker::SecKeychainBugVisitor::VisitNode(
const ExplodedNode *N, BugReporterContext &BRC, BugReport &BR) {
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 8327b2ef96..8e88fadd37 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -712,10 +712,8 @@ bool MallocChecker::isCMemFunction(const FunctionDecl *FD,
return false;
}
-// Tells if the callee is one of the following:
-// 1) A global non-placement new/delete operator function.
-// 2) A global placement operator function with the single placement argument
-// of type std::nothrow_t.
+// Tells if the callee is one of the builtin new/delete operators, including
+// placement operators and other standard overloads.
bool MallocChecker::isStandardNewDelete(const FunctionDecl *FD,
ASTContext &C) const {
if (!FD)
@@ -726,23 +724,11 @@ bool MallocChecker::isStandardNewDelete(const FunctionDecl *FD,
Kind != OO_Delete && Kind != OO_Array_Delete)
return false;
- // Skip all operator new/delete methods.
- if (isa<CXXMethodDecl>(FD))
- return false;
-
- // Return true if tested operator is a standard placement nothrow operator.
- if (FD->getNumParams() == 2) {
- QualType T = FD->getParamDecl(1)->getType();
- if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
- return II->getName().equals("nothrow_t");
- }
-
- // Skip placement operators.
- if (FD->getNumParams() != 1 || FD->isVariadic())
- return false;
-
- // One of the standard new/new[]/delete/delete[] non-placement operators.
- return true;
+ // This is standard if and only if it's not defined in a user file.
+ SourceLocation L = FD->getLocation();
+ // If the header for operator delete is not included, it's still defined
+ // in an invalid source location. Check to make sure we don't crash.
+ return !L.isValid() || C.getSourceManager().isInSystemHeader(L);
}
llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
@@ -1087,12 +1073,6 @@ static bool treatUnusedNewEscaped(const CXXNewExpr *NE) {
void MallocChecker::processNewAllocation(const CXXNewExpr *NE,
CheckerContext &C,
SVal Target) const {
- if (NE->getNumPlacementArgs())
- for (CXXNewExpr::const_arg_iterator I = NE->placement_arg_begin(),
- E = NE->placement_arg_end(); I != E; ++I)
- if (SymbolRef Sym = C.getSVal(*I).getAsSymbol())
- checkUseAfterFree(Sym, C, *I);
-
if (!isStandardNewDelete(NE->getOperatorNew(), C.getASTContext()))
return;
@@ -1103,7 +1083,7 @@ void MallocChecker::processNewAllocation(const CXXNewExpr *NE,
ProgramStateRef State = C.getState();
// The return value from operator new is bound to a specified initialization
// value (if any) and we don't want to loose this value. So we call
- // MallocUpdateRefState() instead of MallocMemAux() which breakes the
+ // MallocUpdateRefState() instead of MallocMemAux() which breaks the
// existing binding.
State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
: AF_CXXNew, Target);
@@ -1114,7 +1094,7 @@ void MallocChecker::processNewAllocation(const CXXNewExpr *NE,
void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
CheckerContext &C) const {
- if (!C.getAnalysisManager().getAnalyzerOptions().mayInlineCXXAllocator())
+ if (!C.getAnalysisManager().getAnalyzerOptions().MayInlineCXXAllocator)
processNewAllocation(NE, C, C.getSVal(NE));
}
@@ -2365,13 +2345,11 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const
{
- if (!SymReaper.hasDeadSymbols())
- return;
-
ProgramStateRef state = C.getState();
- RegionStateTy RS = state->get<RegionState>();
+ RegionStateTy OldRS = state->get<RegionState>();
RegionStateTy::Factory &F = state->get_context<RegionState>();
+ RegionStateTy RS = OldRS;
SmallVector<SymbolRef, 2> Errors;
for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
if (SymReaper.isDead(I->first)) {
@@ -2379,10 +2357,18 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
Errors.push_back(I->first);
// Remove the dead symbol from the map.
RS = F.remove(RS, I->first);
-
}
}
+ if (RS == OldRS) {
+ // We shouldn't have touched other maps yet.
+ assert(state->get<ReallocPairs>() ==
+ C.getState()->get<ReallocPairs>());
+ assert(state->get<FreeReturnValue>() ==
+ C.getState()->get<FreeReturnValue>());
+ return;
+ }
+
// Cleanup the Realloc Pairs Map.
ReallocPairsTy RP = state->get<ReallocPairs>();
for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
@@ -2438,10 +2424,6 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
isCMemFunction(FD, Ctx, AF_IfNameIndex,
MemoryOperationKind::MOK_Free)))
return;
-
- if (ChecksEnabled[CK_NewDeleteChecker] &&
- isStandardNewDelete(FD, Ctx))
- return;
}
// Check if the callee of a method is deleted.
@@ -2539,8 +2521,7 @@ void MallocChecker::checkPostStmt(const BlockExpr *BE,
}
state =
- state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
- Regions.data() + Regions.size()).getState();
+ state->scanReachableSymbols<StopTrackingCallback>(Regions).getState();
C.addTransition(state);
}
@@ -3109,7 +3090,7 @@ markReleased(ProgramStateRef State, SymbolRef Sym, const Expr *Origin) {
void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
registerCStringCheckerBasic(mgr);
MallocChecker *checker = mgr.registerChecker<MallocChecker>();
- checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption(
+ checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption(
"Optimistic", false, checker);
checker->ChecksEnabled[MallocChecker::CK_NewDeleteLeaksChecker] = true;
checker->CheckNames[MallocChecker::CK_NewDeleteLeaksChecker] =
@@ -3130,7 +3111,7 @@ void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
void ento::registerInnerPointerCheckerAux(CheckerManager &mgr) {
registerCStringCheckerBasic(mgr);
MallocChecker *checker = mgr.registerChecker<MallocChecker>();
- checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption(
+ checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption(
"Optimistic", false, checker);
checker->ChecksEnabled[MallocChecker::CK_InnerPointerChecker] = true;
checker->CheckNames[MallocChecker::CK_InnerPointerChecker] =
@@ -3141,7 +3122,7 @@ void ento::registerInnerPointerCheckerAux(CheckerManager &mgr) {
void ento::register##name(CheckerManager &mgr) { \
registerCStringCheckerBasic(mgr); \
MallocChecker *checker = mgr.registerChecker<MallocChecker>(); \
- checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption( \
+ checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption( \
"Optimistic", false, checker); \
checker->ChecksEnabled[MallocChecker::CK_##name] = true; \
checker->CheckNames[MallocChecker::CK_##name] = mgr.getCurrentCheckName(); \
diff --git a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index fc2ab1d6e3..4e45a37fd8 100644
--- a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -135,9 +135,9 @@ private:
bool isIntZeroExpr(const Expr *E) const {
if (!E->getType()->isIntegralOrEnumerationType())
return false;
- llvm::APSInt Result;
+ Expr::EvalResult Result;
if (E->EvaluateAsInt(Result, Context))
- return Result == 0;
+ return Result.Val.getInt() == 0;
return false;
}
@@ -191,8 +191,11 @@ private:
if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(rhse)) {
if (BOp->getOpcode() == BO_Div) {
const Expr *denom = BOp->getRHS()->IgnoreParenImpCasts();
- if (denom->EvaluateAsInt(denomVal, Context))
+ Expr::EvalResult Result;
+ if (denom->EvaluateAsInt(Result, Context)) {
+ denomVal = Result.Val.getInt();
denomKnown = true;
+ }
const Expr *numerator = BOp->getLHS()->IgnoreParenImpCasts();
if (numerator->isEvaluatable(Context))
numeratorKnown = true;
diff --git a/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
index 5060b0e0a6..0d63cfe937 100644
--- a/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -82,7 +82,9 @@ void ento::registerMmapWriteExecChecker(CheckerManager &mgr) {
MmapWriteExecChecker *Mwec =
mgr.registerChecker<MmapWriteExecChecker>();
Mwec->ProtExecOv =
- mgr.getAnalyzerOptions().getOptionAsInteger("MmapProtExec", 0x04, Mwec);
+ mgr.getAnalyzerOptions()
+ .getCheckerIntegerOption("MmapProtExec", 0x04, Mwec);
Mwec->ProtReadOv =
- mgr.getAnalyzerOptions().getOptionAsInteger("MmapProtRead", 0x01, Mwec);
+ mgr.getAnalyzerOptions()
+ .getCheckerIntegerOption("MmapProtRead", 0x01, Mwec);
}
diff --git a/lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp b/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
index 83037f0444..dd83ce02e0 100644
--- a/lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -1,4 +1,4 @@
-// MisusedMovedObjectChecker.cpp - Check use of moved-from objects. - C++ -===//
+// MoveChecker.cpp - Check use of moved-from objects. - C++ ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,6 +20,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringSet.h"
using namespace clang;
using namespace ento;
@@ -42,8 +43,8 @@ public:
void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(K); }
};
-class MisusedMovedObjectChecker
- : public Checker<check::PreCall, check::PostCall, check::EndFunction,
+class MoveChecker
+ : public Checker<check::PreCall, check::PostCall,
check::DeadSymbols, check::RegionChanges> {
public:
void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
@@ -53,22 +54,66 @@ public:
ProgramStateRef
checkRegionChanges(ProgramStateRef State,
const InvalidatedSymbols *Invalidated,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions,
+ ArrayRef<const MemRegion *> RequestedRegions,
+ ArrayRef<const MemRegion *> InvalidatedRegions,
const LocationContext *LCtx, const CallEvent *Call) const;
void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const override;
private:
- enum MisuseKind {MK_FunCall, MK_Copy, MK_Move};
+ enum MisuseKind { MK_FunCall, MK_Copy, MK_Move };
+
+ struct ObjectKind {
+ bool Local : 1; // Is this a local variable or a local rvalue reference?
+ bool STL : 1; // Is this an object of a standard type?
+ };
+
+ // Not all of these are entirely move-safe, but they do provide *some*
+ // guarantees, and it means that somebody is using them after move
+ // in a valid manner.
+ // TODO: We can still try to identify *unsafe* use after move, such as
+ // dereference of a moved-from smart pointer (which is guaranteed to be null).
+ const llvm::StringSet<> StandardMoveSafeClasses = {
+ "basic_filebuf",
+ "basic_ios",
+ "future",
+ "optional",
+ "packaged_task"
+ "promise",
+ "shared_future",
+ "shared_lock",
+ "shared_ptr",
+ "thread",
+ "unique_ptr",
+ "unique_lock",
+ "weak_ptr",
+ };
+
+ // Obtains ObjectKind of an object. Because class declaration cannot always
+ // be easily obtained from the memory region, it is supplied separately.
+ ObjectKind classifyObject(const MemRegion *MR, const CXXRecordDecl *RD) const;
+
+ // Classifies the object and dumps a user-friendly description string to
+ // the stream. Return value is equivalent to classifyObject.
+ ObjectKind explainObject(llvm::raw_ostream &OS,
+ const MemRegion *MR, const CXXRecordDecl *RD) const;
+
+ bool isStandardMoveSafeClass(const CXXRecordDecl *RD) const;
+
class MovedBugVisitor : public BugReporterVisitor {
public:
- MovedBugVisitor(const MemRegion *R) : Region(R), Found(false) {}
+ MovedBugVisitor(const MoveChecker &Chk,
+ const MemRegion *R, const CXXRecordDecl *RD)
+ : Chk(Chk), Region(R), RD(RD), Found(false) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int X = 0;
ID.AddPointer(&X);
ID.AddPointer(Region);
+ // Don't add RD because it's, in theory, uniquely determined by
+ // the region. In practice though, it's not always possible to obtain
+ // the declaration directly from the region, that's why we store it
+ // in the first place.
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
@@ -76,13 +121,22 @@ private:
BugReport &BR) override;
private:
+ const MoveChecker &Chk;
// The tracked region.
const MemRegion *Region;
+ // The class of the tracked object.
+ const CXXRecordDecl *RD;
bool Found;
};
+ bool IsAggressive = false;
+
+public:
+ void setAggressiveness(bool Aggressive) { IsAggressive = Aggressive; }
+
+private:
mutable std::unique_ptr<BugType> BT;
- ExplodedNode *reportBug(const MemRegion *Region, const CallEvent &Call,
+ ExplodedNode *reportBug(const MemRegion *Region, const CXXRecordDecl *RD,
CheckerContext &C, MisuseKind MK) const;
bool isInMoveSafeContext(const LocationContext *LC) const;
bool isStateResetMethod(const CXXMethodDecl *MethodDec) const;
@@ -116,10 +170,19 @@ static bool isAnyBaseRegionReported(ProgramStateRef State,
return false;
}
+static const MemRegion *unwrapRValueReferenceIndirection(const MemRegion *MR) {
+ if (const auto *SR = dyn_cast_or_null<SymbolicRegion>(MR)) {
+ SymbolRef Sym = SR->getSymbol();
+ if (Sym->getType()->isRValueReferenceType())
+ if (const MemRegion *OriginMR = Sym->getOriginRegion())
+ return OriginMR;
+ }
+ return MR;
+}
+
std::shared_ptr<PathDiagnosticPiece>
-MisusedMovedObjectChecker::MovedBugVisitor::VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC,
- BugReport &) {
+MoveChecker::MovedBugVisitor::VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC, BugReport &BR) {
// We need only the last move of the reported object's region.
// The visitor walks the ExplodedGraph backwards.
if (Found)
@@ -140,25 +203,25 @@ MisusedMovedObjectChecker::MovedBugVisitor::VisitNode(const ExplodedNode *N,
return nullptr;
Found = true;
- std::string ObjectName;
- if (const auto DecReg = Region->getAs<DeclRegion>()) {
- const auto *RegionDecl = dyn_cast<NamedDecl>(DecReg->getDecl());
- ObjectName = RegionDecl->getNameAsString();
- }
- std::string InfoText;
- if (ObjectName != "")
- InfoText = "'" + ObjectName + "' became 'moved-from' here";
+ SmallString<128> Str;
+ llvm::raw_svector_ostream OS(Str);
+
+ OS << "Object";
+ ObjectKind OK = Chk.explainObject(OS, Region, RD);
+ if (OK.STL)
+ OS << " is left in a valid but unspecified state after move";
else
- InfoText = "Became 'moved-from' here";
+ OS << " is moved";
// Generate the extra diagnostic.
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
N->getLocationContext());
- return std::make_shared<PathDiagnosticEventPiece>(Pos, InfoText, true);
-}
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, OS.str(), true);
+ }
-const ExplodedNode *MisusedMovedObjectChecker::getMoveLocation(
- const ExplodedNode *N, const MemRegion *Region, CheckerContext &C) const {
+const ExplodedNode *MoveChecker::getMoveLocation(const ExplodedNode *N,
+ const MemRegion *Region,
+ CheckerContext &C) const {
// Walk the ExplodedGraph backwards and find the first node that referred to
// the tracked region.
const ExplodedNode *MoveNode = N;
@@ -173,13 +236,13 @@ const ExplodedNode *MisusedMovedObjectChecker::getMoveLocation(
return MoveNode;
}
-ExplodedNode *MisusedMovedObjectChecker::reportBug(const MemRegion *Region,
- const CallEvent &Call,
- CheckerContext &C,
- MisuseKind MK) const {
+ExplodedNode *MoveChecker::reportBug(const MemRegion *Region,
+ const CXXRecordDecl *RD,
+ CheckerContext &C,
+ MisuseKind MK) const {
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
if (!BT)
- BT.reset(new BugType(this, "Usage of a 'moved-from' object",
+ BT.reset(new BugType(this, "Use-after-move",
"C++ move semantics"));
// Uniqueing report to the same object.
@@ -191,71 +254,37 @@ ExplodedNode *MisusedMovedObjectChecker::reportBug(const MemRegion *Region,
MoveStmt, C.getSourceManager(), MoveNode->getLocationContext());
// Creating the error message.
- std::string ErrorMessage;
+ llvm::SmallString<128> Str;
+ llvm::raw_svector_ostream OS(Str);
switch(MK) {
case MK_FunCall:
- ErrorMessage = "Method call on a 'moved-from' object";
+ OS << "Method called on moved-from object";
+ explainObject(OS, Region, RD);
break;
case MK_Copy:
- ErrorMessage = "Copying a 'moved-from' object";
+ OS << "Moved-from object";
+ explainObject(OS, Region, RD);
+ OS << " is copied";
break;
case MK_Move:
- ErrorMessage = "Moving a 'moved-from' object";
+ OS << "Moved-from object";
+ explainObject(OS, Region, RD);
+ OS << " is moved";
break;
}
- if (const auto DecReg = Region->getAs<DeclRegion>()) {
- const auto *RegionDecl = dyn_cast<NamedDecl>(DecReg->getDecl());
- ErrorMessage += " '" + RegionDecl->getNameAsString() + "'";
- }
auto R =
- llvm::make_unique<BugReport>(*BT, ErrorMessage, N, LocUsedForUniqueing,
+ llvm::make_unique<BugReport>(*BT, OS.str(), N, LocUsedForUniqueing,
MoveNode->getLocationContext()->getDecl());
- R->addVisitor(llvm::make_unique<MovedBugVisitor>(Region));
+ R->addVisitor(llvm::make_unique<MovedBugVisitor>(*this, Region, RD));
C.emitReport(std::move(R));
return N;
}
return nullptr;
}
-// Removing the function parameters' MemRegion from the state. This is needed
-// for PODs where the trivial destructor does not even created nor executed.
-void MisusedMovedObjectChecker::checkEndFunction(const ReturnStmt *RS,
- CheckerContext &C) const {
- auto State = C.getState();
- TrackedRegionMapTy Objects = State->get<TrackedRegionMap>();
- if (Objects.isEmpty())
- return;
-
- auto LC = C.getLocationContext();
-
- const auto LD = dyn_cast_or_null<FunctionDecl>(LC->getDecl());
- if (!LD)
- return;
- llvm::SmallSet<const MemRegion *, 8> InvalidRegions;
-
- for (auto Param : LD->parameters()) {
- auto Type = Param->getType().getTypePtrOrNull();
- if (!Type)
- continue;
- if (!Type->isPointerType() && !Type->isReferenceType()) {
- InvalidRegions.insert(State->getLValue(Param, LC).getAsRegion());
- }
- }
-
- if (InvalidRegions.empty())
- return;
-
- for (const auto &E : State->get<TrackedRegionMap>()) {
- if (InvalidRegions.count(E.first->getBaseRegion()))
- State = State->remove<TrackedRegionMap>(E.first);
- }
-
- C.addTransition(State);
-}
-
-void MisusedMovedObjectChecker::checkPostCall(const CallEvent &Call,
- CheckerContext &C) const {
+void MoveChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
const auto *AFC = dyn_cast<AnyFunctionCall>(&Call);
if (!AFC)
return;
@@ -281,6 +310,20 @@ void MisusedMovedObjectChecker::checkPostCall(const CallEvent &Call,
if (!ArgRegion)
return;
+ // In non-aggressive mode, only warn on use-after-move of local variables (or
+ // local rvalue references) and of STL objects. The former is possible because
+ // local variables (or local rvalue references) are not tempting their user to
+ // re-use the storage. The latter is possible because STL objects are known
+ // to end up in a valid but unspecified state after the move and their
+ // state-reset methods are also known, which allows us to predict
+ // precisely when use-after-move is invalid.
+ // In aggressive mode, warn on any use-after-move because the user
+ // has intentionally asked us to completely eliminate use-after-move
+ // in his code.
+ ObjectKind OK = classifyObject(ArgRegion, MethodDecl->getParent());
+ if (!IsAggressive && !OK.Local && !OK.STL)
+ return;
+
// Skip moving the object to itself.
if (CC && CC->getCXXThisVal().getAsRegion() == ArgRegion)
return;
@@ -302,8 +345,7 @@ void MisusedMovedObjectChecker::checkPostCall(const CallEvent &Call,
C.addTransition(State);
}
-bool MisusedMovedObjectChecker::isMoveSafeMethod(
- const CXXMethodDecl *MethodDec) const {
+bool MoveChecker::isMoveSafeMethod(const CXXMethodDecl *MethodDec) const {
// We abandon the cases where bool/void/void* conversion happens.
if (const auto *ConversionDec =
dyn_cast_or_null<CXXConversionDecl>(MethodDec)) {
@@ -314,20 +356,23 @@ bool MisusedMovedObjectChecker::isMoveSafeMethod(
return true;
}
// Function call `empty` can be skipped.
- if (MethodDec && MethodDec->getDeclName().isIdentifier() &&
+ return (MethodDec && MethodDec->getDeclName().isIdentifier() &&
(MethodDec->getName().lower() == "empty" ||
- MethodDec->getName().lower() == "isempty"))
- return true;
-
- return false;
+ MethodDec->getName().lower() == "isempty"));
}
-bool MisusedMovedObjectChecker::isStateResetMethod(
- const CXXMethodDecl *MethodDec) const {
- if (MethodDec && MethodDec->getDeclName().isIdentifier()) {
+bool MoveChecker::isStateResetMethod(const CXXMethodDecl *MethodDec) const {
+ if (!MethodDec)
+ return false;
+ if (MethodDec->hasAttr<ReinitializesAttr>())
+ return true;
+ if (MethodDec->getDeclName().isIdentifier()) {
std::string MethodName = MethodDec->getName().lower();
+ // TODO: Some of these methods (eg., resize) are not always resetting
+ // the state, so we should consider looking at the arguments.
if (MethodName == "reset" || MethodName == "clear" ||
- MethodName == "destroy")
+ MethodName == "destroy" || MethodName == "resize" ||
+ MethodName == "shrink")
return true;
}
return false;
@@ -335,8 +380,7 @@ bool MisusedMovedObjectChecker::isStateResetMethod(
// Don't report an error inside a move related operation.
// We assume that the programmer knows what she does.
-bool MisusedMovedObjectChecker::isInMoveSafeContext(
- const LocationContext *LC) const {
+bool MoveChecker::isInMoveSafeContext(const LocationContext *LC) const {
do {
const auto *CtxDec = LC->getDecl();
auto *CtorDec = dyn_cast_or_null<CXXConstructorDecl>(CtxDec);
@@ -351,8 +395,45 @@ bool MisusedMovedObjectChecker::isInMoveSafeContext(
return false;
}
-void MisusedMovedObjectChecker::checkPreCall(const CallEvent &Call,
- CheckerContext &C) const {
+bool MoveChecker::isStandardMoveSafeClass(const CXXRecordDecl *RD) const {
+ const IdentifierInfo *II = RD->getIdentifier();
+ return II && StandardMoveSafeClasses.count(II->getName());
+}
+
+MoveChecker::ObjectKind
+MoveChecker::classifyObject(const MemRegion *MR,
+ const CXXRecordDecl *RD) const {
+ // Local variables and local rvalue references are classified as "Local".
+ // For the purposes of this checker, we classify move-safe STL types
+ // as not-"STL" types, because that's how the checker treats them.
+ MR = unwrapRValueReferenceIndirection(MR);
+ return {
+ /*Local=*/
+ MR && isa<VarRegion>(MR) && isa<StackSpaceRegion>(MR->getMemorySpace()),
+ /*STL=*/
+ RD && RD->getDeclContext()->isStdNamespace() &&
+ !isStandardMoveSafeClass(RD)
+ };
+}
+
+MoveChecker::ObjectKind
+MoveChecker::explainObject(llvm::raw_ostream &OS, const MemRegion *MR,
+ const CXXRecordDecl *RD) const {
+ // We may need a leading space every time we actually explain anything,
+ // and we never know if we are to explain anything until we try.
+ if (const auto DR =
+ dyn_cast_or_null<DeclRegion>(unwrapRValueReferenceIndirection(MR))) {
+ const auto *RegionDecl = cast<NamedDecl>(DR->getDecl());
+ OS << " '" << RegionDecl->getNameAsString() << "'";
+ }
+ ObjectKind OK = classifyObject(MR, RD);
+ if (OK.STL) {
+ OS << " of type '" << RD->getQualifiedNameAsString() << "'";
+ }
+ return OK;
+}
+
+void MoveChecker::checkPreCall(const CallEvent &Call, CheckerContext &C) const {
ProgramStateRef State = C.getState();
const LocationContext *LC = C.getLocationContext();
ExplodedNode *N = nullptr;
@@ -370,10 +451,11 @@ void MisusedMovedObjectChecker::checkPreCall(const CallEvent &Call,
const RegionState *ArgState = State->get<TrackedRegionMap>(ArgRegion);
if (ArgState && ArgState->isMoved()) {
if (!isInMoveSafeContext(LC)) {
+ const CXXRecordDecl *RD = CtorDec->getParent();
if(CtorDec->isMoveConstructor())
- N = reportBug(ArgRegion, Call, C, MK_Move);
+ N = reportBug(ArgRegion, RD, C, MK_Move);
else
- N = reportBug(ArgRegion, Call, C, MK_Copy);
+ N = reportBug(ArgRegion, RD, C, MK_Copy);
State = State->set<TrackedRegionMap>(ArgRegion,
RegionState::getReported());
}
@@ -386,20 +468,22 @@ void MisusedMovedObjectChecker::checkPreCall(const CallEvent &Call,
const auto IC = dyn_cast<CXXInstanceCall>(&Call);
if (!IC)
return;
- // In case of destructor call we do not track the object anymore.
- const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
- if (!ThisRegion)
+
+ // Calling a destructor on a moved object is fine.
+ if (isa<CXXDestructorCall>(IC))
return;
- if (dyn_cast_or_null<CXXDestructorDecl>(Call.getDecl())) {
- State = removeFromState(State, ThisRegion);
- C.addTransition(State);
+ const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
return;
- }
const auto MethodDecl = dyn_cast_or_null<CXXMethodDecl>(IC->getDecl());
if (!MethodDecl)
return;
+
+ // Store class declaration as well, for bug reporting purposes.
+ const CXXRecordDecl *RD = MethodDecl->getParent();
+
// Checking assignment operators.
bool OperatorEq = MethodDecl->isOverloadedOperator() &&
MethodDecl->getOverloadedOperator() == OO_Equal;
@@ -414,9 +498,9 @@ void MisusedMovedObjectChecker::checkPreCall(const CallEvent &Call,
if (ArgState && ArgState->isMoved() && !isInMoveSafeContext(LC)) {
const MemRegion *ArgRegion = IC->getArgSVal(0).getAsRegion();
if(MethodDecl->isMoveAssignmentOperator())
- N = reportBug(ArgRegion, Call, C, MK_Move);
+ N = reportBug(ArgRegion, RD, C, MK_Move);
else
- N = reportBug(ArgRegion, Call, C, MK_Copy);
+ N = reportBug(ArgRegion, RD, C, MK_Copy);
State =
State->set<TrackedRegionMap>(ArgRegion, RegionState::getReported());
}
@@ -429,8 +513,7 @@ void MisusedMovedObjectChecker::checkPreCall(const CallEvent &Call,
// We want to investigate the whole object, not only sub-object of a parent
// class in which the encountered method defined.
- while (const CXXBaseObjectRegion *BR =
- dyn_cast<CXXBaseObjectRegion>(ThisRegion))
+ while (const auto *BR = dyn_cast<CXXBaseObjectRegion>(ThisRegion))
ThisRegion = BR->getSuperRegion();
if (isMoveSafeMethod(MethodDecl))
@@ -454,13 +537,13 @@ void MisusedMovedObjectChecker::checkPreCall(const CallEvent &Call,
if (isInMoveSafeContext(LC))
return;
- N = reportBug(ThisRegion, Call, C, MK_FunCall);
+ N = reportBug(ThisRegion, RD, C, MK_FunCall);
State = State->set<TrackedRegionMap>(ThisRegion, RegionState::getReported());
C.addTransition(State, N);
}
-void MisusedMovedObjectChecker::checkDeadSymbols(SymbolReaper &SymReaper,
- CheckerContext &C) const {
+void MoveChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
TrackedRegionMapTy TrackedRegions = State->get<TrackedRegionMap>();
for (TrackedRegionMapTy::value_type E : TrackedRegions) {
@@ -475,34 +558,44 @@ void MisusedMovedObjectChecker::checkDeadSymbols(SymbolReaper &SymReaper,
C.addTransition(State);
}
-ProgramStateRef MisusedMovedObjectChecker::checkRegionChanges(
+ProgramStateRef MoveChecker::checkRegionChanges(
ProgramStateRef State, const InvalidatedSymbols *Invalidated,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions, const LocationContext *LCtx,
- const CallEvent *Call) const {
- // In case of an InstanceCall don't remove the ThisRegion from the GDM since
- // it is handled in checkPreCall and checkPostCall.
- const MemRegion *ThisRegion = nullptr;
- if (const auto *IC = dyn_cast_or_null<CXXInstanceCall>(Call)) {
- ThisRegion = IC->getCXXThisVal().getAsRegion();
- }
-
- for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
- E = ExplicitRegions.end();
- I != E; ++I) {
- const auto *Region = *I;
- if (ThisRegion != Region) {
- State = removeFromState(State, Region);
+ ArrayRef<const MemRegion *> RequestedRegions,
+ ArrayRef<const MemRegion *> InvalidatedRegions,
+ const LocationContext *LCtx, const CallEvent *Call) const {
+ if (Call) {
+ // Relax invalidation upon function calls: only invalidate parameters
+ // that are passed directly via non-const pointers or non-const references
+ // or rvalue references.
+ // In case of an InstanceCall don't invalidate the this-region since
+ // it is fully handled in checkPreCall and checkPostCall.
+ const MemRegion *ThisRegion = nullptr;
+ if (const auto *IC = dyn_cast<CXXInstanceCall>(Call))
+ ThisRegion = IC->getCXXThisVal().getAsRegion();
+
+ // Requested ("explicit") regions are the regions passed into the call
+ // directly, but not all of them end up being invalidated.
+ // But when they do, they appear in the InvalidatedRegions array as well.
+ for (const auto *Region : RequestedRegions) {
+ if (ThisRegion != Region) {
+ if (llvm::find(InvalidatedRegions, Region) !=
+ std::end(InvalidatedRegions)) {
+ State = removeFromState(State, Region);
+ }
+ }
}
+ } else {
+ // For invalidations that aren't caused by calls, assume nothing. In
+ // particular, direct write into an object's field invalidates the status.
+ for (const auto *Region : InvalidatedRegions)
+ State = removeFromState(State, Region->getBaseRegion());
}
return State;
}
-void MisusedMovedObjectChecker::printState(raw_ostream &Out,
- ProgramStateRef State,
- const char *NL,
- const char *Sep) const {
+void MoveChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
TrackedRegionMapTy RS = State->get<TrackedRegionMap>();
@@ -518,6 +611,8 @@ void MisusedMovedObjectChecker::printState(raw_ostream &Out,
}
}
}
-void ento::registerMisusedMovedObjectChecker(CheckerManager &mgr) {
- mgr.registerChecker<MisusedMovedObjectChecker>();
+void ento::registerMoveChecker(CheckerManager &mgr) {
+ MoveChecker *chk = mgr.registerChecker<MoveChecker>();
+ chk->setAggressiveness(mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ "Aggressive", false, chk));
}
diff --git a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 01d2c0491b..a97eab4e82 100644
--- a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -192,7 +192,7 @@ NonNullParamChecker::genReportNullAttrNonNull(const ExplodedNode *ErrorNode,
*BTAttrNonNull,
"Null pointer passed as an argument to a 'nonnull' parameter", ErrorNode);
if (ArgE)
- bugreporter::trackNullOrUndefValue(ErrorNode, ArgE, *R);
+ bugreporter::trackExpressionValue(ErrorNode, ArgE, *R);
return R;
}
@@ -208,9 +208,7 @@ std::unique_ptr<BugReport> NonNullParamChecker::genReportReferenceToNullPointer(
const Expr *ArgEDeref = bugreporter::getDerefExpr(ArgE);
if (!ArgEDeref)
ArgEDeref = ArgE;
- bugreporter::trackNullOrUndefValue(ErrorNode,
- ArgEDeref,
- *R);
+ bugreporter::trackExpressionValue(ErrorNode, ArgEDeref, *R);
}
return R;
diff --git a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index 86ae9cb666..ce656d5201 100644
--- a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -174,7 +174,8 @@ private:
if (Error == ErrorKind::NilAssignedToNonnull ||
Error == ErrorKind::NilPassedToNonnull ||
Error == ErrorKind::NilReturnedToNonnull)
- bugreporter::trackNullOrUndefValue(N, ValueExpr, *R);
+ if (const auto *Ex = dyn_cast<Expr>(ValueExpr))
+ bugreporter::trackExpressionValue(N, Ex, *R);
}
BR.emitReport(std::move(R));
}
@@ -184,7 +185,7 @@ private:
const SymbolicRegion *getTrackRegion(SVal Val,
bool CheckSuperRegion = false) const;
- /// Returns true if the call is diagnosable in the currrent analyzer
+ /// Returns true if the call is diagnosable in the current analyzer
/// configuration.
bool isDiagnosableCall(const CallEvent &Call) const {
if (NoDiagnoseCallsToSystemHeaders && Call.isInSystemHeader())
@@ -328,8 +329,8 @@ NullabilityChecker::NullabilityBugVisitor::VisitNode(const ExplodedNode *N,
nullptr);
}
-/// Returns true when the value stored at the given location is null
-/// and the passed in type is nonnnull.
+/// Returns true when the value stored at the given location has been
+/// constrained to null after being passed through an object of nonnnull type.
static bool checkValueAtLValForInvariantViolation(ProgramStateRef State,
SVal LV, QualType T) {
if (getNullabilityAnnotation(T) != Nullability::Nonnull)
@@ -339,9 +340,14 @@ static bool checkValueAtLValForInvariantViolation(ProgramStateRef State,
if (!RegionVal)
return false;
- auto StoredVal =
- State->getSVal(RegionVal->getRegion()).getAs<DefinedOrUnknownSVal>();
- if (!StoredVal)
+ // If the value was constrained to null *after* it was passed through that
+ // location, it could not have been a concrete pointer *when* it was passed.
+ // In that case we would have handled the situation when the value was
+ // bound to that location, by emitting (or not emitting) a report.
+ // Therefore we are only interested in symbolic regions that can be either
+ // null or non-null depending on the value of their respective symbol.
+ auto StoredVal = State->getSVal(*RegionVal).getAs<loc::MemRegionVal>();
+ if (!StoredVal || !isa<SymbolicRegion>(StoredVal->getRegion()))
return false;
if (getNullConstraint(*StoredVal, State) == NullConstraint::IsNull)
@@ -445,9 +451,6 @@ void NullabilityChecker::reportBugIfInvariantHolds(StringRef Msg,
/// Cleaning up the program state.
void NullabilityChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
- if (!SR.hasDeadSymbols())
- return;
-
ProgramStateRef State = C.getState();
NullabilityMapTy Nullabilities = State->get<NullabilityMap>();
for (NullabilityMapTy::iterator I = Nullabilities.begin(),
@@ -1172,10 +1175,15 @@ void NullabilityChecker::printState(raw_ostream &Out, ProgramStateRef State,
NullabilityMapTy B = State->get<NullabilityMap>();
+ if (State->get<InvariantViolated>())
+ Out << Sep << NL
+ << "Nullability invariant was violated, warnings suppressed." << NL;
+
if (B.isEmpty())
return;
- Out << Sep << NL;
+ if (!State->get<InvariantViolated>())
+ Out << Sep << NL;
for (NullabilityMapTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
Out << I->first << " : ";
@@ -1192,7 +1200,7 @@ void NullabilityChecker::printState(raw_ostream &Out, ProgramStateRef State,
checker->NeedTracking = checker->NeedTracking || trackingRequired; \
checker->NoDiagnoseCallsToSystemHeaders = \
checker->NoDiagnoseCallsToSystemHeaders || \
- mgr.getAnalyzerOptions().getBooleanOption( \
+ mgr.getAnalyzerOptions().getCheckerBooleanOption( \
"NoDiagnoseCallsToSystemHeaders", false, checker, true); \
}
diff --git a/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index 56b42239d7..f808739347 100644
--- a/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -87,9 +87,10 @@ void Callback::run(const MatchFinder::MatchResult &Result) {
MacroIndicatesWeShouldSkipTheCheck = true;
}
if (!MacroIndicatesWeShouldSkipTheCheck) {
- llvm::APSInt Result;
+ Expr::EvalResult EVResult;
if (CheckIfNull->IgnoreParenCasts()->EvaluateAsInt(
- Result, ACtx, Expr::SE_AllowSideEffects)) {
+ EVResult, ACtx, Expr::SE_AllowSideEffects)) {
+ llvm::APSInt Result = EVResult.Val.getInt();
if (Result == 0) {
if (!C->Pedantic)
return;
@@ -346,5 +347,5 @@ void ento::registerNumberObjectConversionChecker(CheckerManager &Mgr) {
NumberObjectConversionChecker *Chk =
Mgr.registerChecker<NumberObjectConversionChecker>();
Chk->Pedantic =
- Mgr.getAnalyzerOptions().getBooleanOption("Pedantic", false, Chk);
+ Mgr.getAnalyzerOptions().getCheckerBooleanOption("Pedantic", false, Chk);
}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index b7339fe79f..f56a795636 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -49,7 +49,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
"for @synchronized"));
auto report =
llvm::make_unique<BugReport>(*BT_undef, BT_undef->getDescription(), N);
- bugreporter::trackNullOrUndefValue(N, Ex, *report);
+ bugreporter::trackExpressionValue(N, Ex, *report);
C.emitReport(std::move(report));
}
return;
@@ -73,7 +73,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
"(no synchronization will occur)"));
auto report =
llvm::make_unique<BugReport>(*BT_null, BT_null->getDescription(), N);
- bugreporter::trackNullOrUndefValue(N, Ex, *report);
+ bugreporter::trackExpressionValue(N, Ex, *report);
C.emitReport(std::move(report));
return;
@@ -89,6 +89,6 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
}
void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
- if (mgr.getLangOpts().ObjC2)
+ if (mgr.getLangOpts().ObjC)
mgr.registerChecker<ObjCAtSyncChecker>();
}
diff --git a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index b1255243be..dc361ad537 100644
--- a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -41,7 +41,8 @@ public:
BugReporter &BRArg) const {
BR = &BRArg;
AllowedPad =
- MGR.getAnalyzerOptions().getOptionAsInteger("AllowedPad", 24, this);
+ MGR.getAnalyzerOptions()
+ .getCheckerIntegerOption("AllowedPad", 24, this);
assert(AllowedPad >= 0 && "AllowedPad option should be non-negative");
// The calls to checkAST* from AnalysisConsumer don't
@@ -75,6 +76,20 @@ public:
if (shouldSkipDecl(RD))
return;
+ // TODO: Figure out why we are going through declarations and not only
+ // definitions.
+ if (!(RD = RD->getDefinition()))
+ return;
+
+ // This is the simplest correct case: a class with no fields and one base
+ // class. Other cases are more complicated because of how the base classes
+ // & fields might interact, so we don't bother dealing with them.
+ // TODO: Support other combinations of base classes and fields.
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->field_empty() && CXXRD->getNumBases() == 1)
+ return visitRecord(CXXRD->bases().begin()->getType()->getAsRecordDecl(),
+ PadMultiplier);
+
auto &ASTContext = RD->getASTContext();
const ASTRecordLayout &RL = ASTContext.getASTRecordLayout(RD);
assert(llvm::isPowerOf2_64(RL.getAlignment().getQuantity()));
@@ -112,12 +127,15 @@ public:
if (RT == nullptr)
return;
- // TODO: Recurse into the fields and base classes to see if any
- // of those have excess padding.
+ // TODO: Recurse into the fields to see if they have excess padding.
visitRecord(RT->getDecl(), Elts);
}
bool shouldSkipDecl(const RecordDecl *RD) const {
+ // TODO: Figure out why we are going through declarations and not only
+ // definitions.
+ if (!(RD = RD->getDefinition()))
+ return true;
auto Location = RD->getLocation();
// If the construct doesn't have a source file, then it's not something
// we want to diagnose.
@@ -132,13 +150,14 @@ public:
// Not going to attempt to optimize unions.
if (RD->isUnion())
return true;
- // How do you reorder fields if you haven't got any?
- if (RD->field_empty())
- return true;
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
// Tail padding with base classes ends up being very complicated.
- // We will skip objects with base classes for now.
- if (CXXRD->getNumBases() != 0)
+ // We will skip objects with base classes for now, unless they do not
+ // have fields.
+ // TODO: Handle more base class scenarios.
+ if (!CXXRD->field_empty() && CXXRD->getNumBases() != 0)
+ return true;
+ if (CXXRD->field_empty() && CXXRD->getNumBases() != 1)
return true;
// Virtual bases are complicated, skipping those for now.
if (CXXRD->getNumVBases() != 0)
@@ -150,6 +169,10 @@ public:
if (CXXRD->getTypeForDecl()->isInstantiationDependentType())
return true;
}
+ // How do you reorder fields if you haven't got any?
+ else if (RD->field_empty())
+ return true;
+
auto IsTrickyField = [](const FieldDecl *FD) -> bool {
// Bitfield layout is hard.
if (FD->isBitField())
@@ -323,7 +346,7 @@ public:
BR->emitReport(std::move(Report));
}
};
-}
+} // namespace
void ento::registerPaddingChecker(CheckerManager &Mgr) {
Mgr.registerChecker<PaddingChecker>();
diff --git a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index 63f82b275b..af242845f0 100644
--- a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -112,7 +112,7 @@ PointerArithChecker::getPointedRegion(const MemRegion *Region,
}
/// Checks whether a region is the part of an array.
-/// In case there is a dericed to base cast above the array element, the
+/// In case there is a derived to base cast above the array element, the
/// Polymorphic output value is set to true. AKind output value is set to the
/// allocation kind of the inspected region.
const MemRegion *PointerArithChecker::getArrayRegion(const MemRegion *Region,
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
index e5d27f577d..488cf6d3eb 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -39,13 +39,76 @@ ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
return State->remove<RefBindings>(Sym);
}
+class UseAfterRelease : public CFRefBug {
+public:
+ UseAfterRelease(const CheckerBase *checker)
+ : CFRefBug(checker, "Use-after-release") {}
+
+ const char *getDescription() const override {
+ return "Reference-counted object is used after it is released";
+ }
+};
+
+class BadRelease : public CFRefBug {
+public:
+ BadRelease(const CheckerBase *checker) : CFRefBug(checker, "Bad release") {}
+
+ const char *getDescription() const override {
+ return "Incorrect decrement of the reference count of an object that is "
+ "not owned at this point by the caller";
+ }
+};
+
+class DeallocNotOwned : public CFRefBug {
+public:
+ DeallocNotOwned(const CheckerBase *checker)
+ : CFRefBug(checker, "-dealloc sent to non-exclusively owned object") {}
+
+ const char *getDescription() const override {
+ return "-dealloc sent to object that may be referenced elsewhere";
+ }
+};
+
+class OverAutorelease : public CFRefBug {
+public:
+ OverAutorelease(const CheckerBase *checker)
+ : CFRefBug(checker, "Object autoreleased too many times") {}
+
+ const char *getDescription() const override {
+ return "Object autoreleased too many times";
+ }
+};
+
+class ReturnedNotOwnedForOwned : public CFRefBug {
+public:
+ ReturnedNotOwnedForOwned(const CheckerBase *checker)
+ : CFRefBug(checker, "Method should return an owned object") {}
+
+ const char *getDescription() const override {
+ return "Object with a +0 retain count returned to caller where a +1 "
+ "(owning) retain count is expected";
+ }
+};
+
+class Leak : public CFRefBug {
+public:
+ Leak(const CheckerBase *checker, StringRef name) : CFRefBug(checker, name) {
+ // Leaks should not be reported if they are post-dominated by a sink.
+ setSuppressOnSink(true);
+ }
+
+ const char *getDescription() const override { return ""; }
+
+ bool isLeak() const override { return true; }
+};
+
} // end namespace retaincountchecker
} // end namespace ento
} // end namespace clang
void RefVal::print(raw_ostream &Out) const {
if (!T.isNull())
- Out << "Tracked " << T.getAsString() << '/';
+ Out << "Tracked " << T.getAsString() << " | ";
switch (getKind()) {
default: llvm_unreachable("Invalid RefVal kind");
@@ -175,9 +238,7 @@ void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
Regions.push_back(VR);
}
- state =
- state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
- Regions.data() + Regions.size()).getState();
+ state = state->scanReachableSymbols<StopTrackingCallback>(Regions).getState();
C.addTransition(state);
}
@@ -352,6 +413,56 @@ void RetainCountChecker::checkPostCall(const CallEvent &Call,
checkSummary(*Summ, Call, C);
}
+void RetainCountChecker::checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
+ ExprEngine &Eng) const {
+ // FIXME: This is a hack to make sure the summary log gets cleared between
+ // analyses of different code bodies.
+ //
+ // Why is this necessary? Because a checker's lifetime is tied to a
+ // translation unit, but an ExplodedGraph's lifetime is just a code body.
+ // Once in a blue moon, a new ExplodedNode will have the same address as an
+ // old one with an associated summary, and the bug report visitor gets very
+ // confused. (To make things worse, the summary lifetime is currently also
+ // tied to a code body, so we get a crash instead of incorrect results.)
+ //
+ // Why is this a bad solution? Because if the lifetime of the ExplodedGraph
+ // changes, things will start going wrong again. Really the lifetime of this
+ // log needs to be tied to either the specific nodes in it or the entire
+ // ExplodedGraph, not to a specific part of the code being analyzed.
+ //
+ // (Also, having stateful local data means that the same checker can't be
+ // used from multiple threads, but a lot of checkers have incorrect
+ // assumptions about that anyway. So that wasn't a priority at the time of
+ // this fix.)
+ //
+ // This happens at the end of analysis, but bug reports are emitted /after/
+ // this point. So we can't just clear the summary log now. Instead, we mark
+ // that the next time we access the summary log, it should be cleared.
+
+ // If we never reset the summary log during /this/ code body analysis,
+ // there were no new summaries. There might still have been summaries from
+ // the /last/ analysis, so clear them out to make sure the bug report
+ // visitors don't get confused.
+ if (ShouldResetSummaryLog)
+ SummaryLog.clear();
+
+ ShouldResetSummaryLog = !SummaryLog.empty();
+}
+
+CFRefBug *
+RetainCountChecker::getLeakWithinFunctionBug(const LangOptions &LOpts) const {
+ if (!leakWithinFunction)
+ leakWithinFunction.reset(new Leak(this, "Leak"));
+ return leakWithinFunction.get();
+}
+
+CFRefBug *
+RetainCountChecker::getLeakAtReturnBug(const LangOptions &LOpts) const {
+ if (!leakAtReturn)
+ leakAtReturn.reset(new Leak(this, "Leak of returned object"));
+ return leakAtReturn.get();
+}
+
/// GetReturnType - Used to get the return type of a message expression or
/// function call with the intention of affixing that type to a tracked symbol.
/// While the return type can be queried directly from RetEx, when
@@ -422,13 +533,6 @@ void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ,
RetEffect RE = Summ.getRetEffect();
if (SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol()) {
- if (const auto *MCall = dyn_cast<CXXMemberCall>(&CallOrMsg)) {
- if (Optional<RefVal> updatedRefVal =
- refValFromRetEffect(RE, MCall->getResultType())) {
- state = setRefBinding(state, Sym, *updatedRefVal);
- }
- }
-
if (RE.getKind() == RetEffect::NoRetHard)
state = removeRefBinding(state, Sym);
}
@@ -470,6 +574,25 @@ static ProgramStateRef updateOutParameter(ProgramStateRef State,
return State;
}
+static bool isPointerToObject(QualType QT) {
+ QualType PT = QT->getPointeeType();
+ if (!PT.isNull())
+ if (PT->getAsCXXRecordDecl())
+ return true;
+ return false;
+}
+
+/// Whether the tracked value should be escaped on a given call.
+/// OSObjects are escaped when passed to void * / etc.
+static bool shouldEscapeArgumentOnCall(const CallEvent &CE, unsigned ArgIdx,
+ const RefVal *TrackedValue) {
+ if (TrackedValue->getObjKind() != RetEffect::OS)
+ return false;
+ if (ArgIdx >= CE.parameters().size())
+ return false;
+ return !isPointerToObject(CE.parameters()[ArgIdx]->getType());
+}
+
void RetainCountChecker::checkSummary(const RetainSummary &Summ,
const CallEvent &CallOrMsg,
CheckerContext &C) const {
@@ -488,6 +611,10 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
state = updateOutParameter(state, V, Effect);
} else if (SymbolRef Sym = V.getAsLocSymbol()) {
if (const RefVal *T = getRefBinding(state, Sym)) {
+
+ if (shouldEscapeArgumentOnCall(CallOrMsg, idx, T))
+ Effect = StopTrackingHard;
+
state = updateSymbol(state, Sym, *T, Effect, hasErr, C);
if (hasErr) {
ErrorRange = CallOrMsg.getArgSourceRange(idx);
@@ -637,7 +764,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
break;
}
- // Fall-through.
+ LLVM_FALLTHROUGH;
case DoNothing:
return state;
@@ -774,40 +901,48 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// annotate attribute. If it does, we will not inline it.
bool hasTrustedImplementationAnnotation = false;
+ const LocationContext *LCtx = C.getLocationContext();
+
+ using BehaviorSummary = RetainSummaryManager::BehaviorSummary;
+ Optional<BehaviorSummary> BSmr =
+ SmrMgr.canEval(CE, FD, hasTrustedImplementationAnnotation);
+
// See if it's one of the specific functions we know how to eval.
- if (!SmrMgr.canEval(CE, FD, hasTrustedImplementationAnnotation))
+ if (!BSmr)
return false;
// Bind the return value.
- const LocationContext *LCtx = C.getLocationContext();
- SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
- if (RetVal.isUnknown() ||
- (hasTrustedImplementationAnnotation && !ResultTy.isNull())) {
+ if (BSmr == BehaviorSummary::Identity ||
+ BSmr == BehaviorSummary::IdentityOrZero) {
+ SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
+
// If the receiver is unknown or the function has
// 'rc_ownership_trusted_implementation' annotate attribute, conjure a
// return value.
- SValBuilder &SVB = C.getSValBuilder();
- RetVal = SVB.conjureSymbolVal(nullptr, CE, LCtx, ResultTy, C.blockCount());
- }
- state = state->BindExpr(CE, LCtx, RetVal, false);
-
- // FIXME: This should not be necessary, but otherwise the argument seems to be
- // considered alive during the next statement.
- if (const MemRegion *ArgRegion = RetVal.getAsRegion()) {
- // Save the refcount status of the argument.
- SymbolRef Sym = RetVal.getAsLocSymbol();
- const RefVal *Binding = nullptr;
- if (Sym)
- Binding = getRefBinding(state, Sym);
-
- // Invalidate the argument region.
- state = state->invalidateRegions(
- ArgRegion, CE, C.blockCount(), LCtx,
- /*CausesPointerEscape*/ hasTrustedImplementationAnnotation);
-
- // Restore the refcount status of the argument.
- if (Binding)
- state = setRefBinding(state, Sym, *Binding);
+ if (RetVal.isUnknown() ||
+ (hasTrustedImplementationAnnotation && !ResultTy.isNull())) {
+ SValBuilder &SVB = C.getSValBuilder();
+ RetVal =
+ SVB.conjureSymbolVal(nullptr, CE, LCtx, ResultTy, C.blockCount());
+ }
+ state = state->BindExpr(CE, LCtx, RetVal, /*Invalidate=*/false);
+
+ if (BSmr == BehaviorSummary::IdentityOrZero) {
+ // Add a branch where the output is zero.
+ ProgramStateRef NullOutputState = C.getState();
+
+ // Assume that output is zero on the other branch.
+ NullOutputState = NullOutputState->BindExpr(
+ CE, LCtx, C.getSValBuilder().makeNull(), /*Invalidate=*/false);
+
+ C.addTransition(NullOutputState);
+
+ // And on the original branch assume that both input and
+ // output are non-zero.
+ if (auto L = RetVal.getAs<DefinedOrUnknownSVal>())
+ state = state->assume(*L, /*Assumption=*/true);
+
+ }
}
C.addTransition(state);
@@ -947,8 +1082,7 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
if (N) {
const LangOptions &LOpts = C.getASTContext().getLangOpts();
auto R = llvm::make_unique<CFRefLeakReport>(
- *getLeakAtReturnBug(LOpts), LOpts, SummaryLog, N, Sym, C,
- IncludeAllocationLine);
+ *getLeakAtReturnBug(LOpts), LOpts, SummaryLog, N, Sym, C);
C.emitReport(std::move(R));
}
return N;
@@ -1097,9 +1231,8 @@ RetainCountChecker::checkRegionChanges(ProgramStateRef state,
WhitelistedSymbols.insert(SR->getSymbol());
}
- for (InvalidatedSymbols::const_iterator I=invalidated->begin(),
- E = invalidated->end(); I!=E; ++I) {
- SymbolRef sym = *I;
+ for (SymbolRef sym :
+ llvm::make_range(invalidated->begin(), invalidated->end())) {
if (WhitelistedSymbols.count(sym))
continue;
// Remove any existing reference-count binding.
@@ -1235,7 +1368,7 @@ RetainCountChecker::processLeaks(ProgramStateRef state,
assert(BT && "BugType not initialized.");
Ctx.emitReport(llvm::make_unique<CFRefLeakReport>(
- *BT, LOpts, SummaryLog, N, *I, Ctx, IncludeAllocationLine));
+ *BT, LOpts, SummaryLog, N, *I, Ctx));
}
}
@@ -1322,19 +1455,6 @@ void RetainCountChecker::checkEndFunction(const ReturnStmt *RS,
processLeaks(state, Leaked, Ctx, Pred);
}
-const ProgramPointTag *
-RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
- const CheckerProgramPointTag *&tag = DeadSymbolTags[sym];
- if (!tag) {
- SmallString<64> buf;
- llvm::raw_svector_ostream out(buf);
- out << "Dead Symbol : ";
- sym->dumpToStream(out);
- tag = new CheckerProgramPointTag(this, out.str());
- }
- return tag;
-}
-
void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
ExplodedNode *Pred = C.getPredecessor();
@@ -1344,20 +1464,18 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
SmallVector<SymbolRef, 10> Leaked;
// Update counts from autorelease pools
- for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
- E = SymReaper.dead_end(); I != E; ++I) {
- SymbolRef Sym = *I;
- if (const RefVal *T = B.lookup(Sym)){
- // Use the symbol as the tag.
- // FIXME: This might not be as unique as we would like.
- const ProgramPointTag *Tag = getDeadSymbolTag(Sym);
- state = handleAutoreleaseCounts(state, Pred, Tag, C, Sym, *T);
+ for (const auto &I: state->get<RefBindings>()) {
+ SymbolRef Sym = I.first;
+ if (SymReaper.isDead(Sym)) {
+ static CheckerProgramPointTag Tag(this, "DeadSymbolAutorelease");
+ const RefVal &V = I.second;
+ state = handleAutoreleaseCounts(state, Pred, &Tag, C, Sym, V);
if (!state)
return;
// Fetch the new reference count from the state, and use it to handle
// this symbol.
- state = handleSymbolDeath(state, *I, *getRefBinding(state, Sym), Leaked);
+ state = handleSymbolDeath(state, Sym, *getRefBinding(state, Sym), Leaked);
}
}
@@ -1408,5 +1526,23 @@ void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
//===----------------------------------------------------------------------===//
void ento::registerRetainCountChecker(CheckerManager &Mgr) {
- Mgr.registerChecker<RetainCountChecker>(Mgr.getAnalyzerOptions());
+ auto *Chk = Mgr.registerChecker<RetainCountChecker>();
+ Chk->TrackObjCAndCFObjects = true;
+}
+
+// FIXME: remove this, hack for backwards compatibility:
+// it should be possible to enable the NS/CF retain count checker as
+// osx.cocoa.RetainCount, and it should be possible to disable
+// osx.OSObjectRetainCount using osx.cocoa.RetainCount:CheckOSObject=false.
+static bool hasPrevCheckOSObjectOptionDisabled(AnalyzerOptions &Options) {
+ auto I = Options.Config.find("osx.cocoa.RetainCount:CheckOSObject");
+ if (I != Options.Config.end())
+ return I->getValue() == "false";
+ return false;
+}
+
+void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) {
+ auto *Chk = Mgr.registerChecker<RetainCountChecker>();
+ if (!hasPrevCheckOSObjectOptionDisabled(Mgr.getAnalyzerOptions()))
+ Chk->TrackOSObjects = true;
}
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
index e8d9136ffd..0f43e8f5dd 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
@@ -16,7 +16,6 @@
#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_RETAINCOUNTCHECKER_H
#include "../ClangSACheckers.h"
-#include "../AllocationDiagnostics.h"
#include "RetainCountDiagnostics.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
@@ -45,8 +44,6 @@
#include <cstdarg>
#include <utility>
-using llvm::StrInStrNoCase;
-
namespace clang {
namespace ento {
namespace retaincountchecker {
@@ -95,7 +92,7 @@ private:
/// See the RefVal::Kind enum for possible values.
unsigned RawKind : 5;
- /// The kind of object being tracked (CF or ObjC), if known.
+ /// The kind of object being tracked (CF or ObjC or OSObject), if known.
///
/// See the RetEffect::ObjKind enum for possible values.
unsigned RawObjectKind : 3;
@@ -268,72 +265,26 @@ class RetainCountChecker
mutable std::unique_ptr<RetainSummaryManager> Summaries;
mutable SummaryLogTy SummaryLog;
- AnalyzerOptions &Options;
mutable bool ShouldResetSummaryLog;
- /// Optional setting to indicate if leak reports should include
- /// the allocation line.
- mutable bool IncludeAllocationLine;
-
public:
- RetainCountChecker(AnalyzerOptions &Options)
- : Options(Options), ShouldResetSummaryLog(false),
- IncludeAllocationLine(
- shouldIncludeAllocationSiteInLeakDiagnostics(Options)) {}
- ~RetainCountChecker() override { DeleteContainerSeconds(DeadSymbolTags); }
+ /// Track Objective-C and CoreFoundation objects.
+ bool TrackObjCAndCFObjects = false;
- bool shouldCheckOSObjectRetainCount() const {
- return Options.getBooleanOption("CheckOSObject", false, this);
- }
+ /// Track sublcasses of OSObject.
+ bool TrackOSObjects = false;
+
+ RetainCountChecker() : ShouldResetSummaryLog(false) {}
+
+ ~RetainCountChecker() override { DeleteContainerSeconds(DeadSymbolTags); }
void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
- ExprEngine &Eng) const {
- // FIXME: This is a hack to make sure the summary log gets cleared between
- // analyses of different code bodies.
- //
- // Why is this necessary? Because a checker's lifetime is tied to a
- // translation unit, but an ExplodedGraph's lifetime is just a code body.
- // Once in a blue moon, a new ExplodedNode will have the same address as an
- // old one with an associated summary, and the bug report visitor gets very
- // confused. (To make things worse, the summary lifetime is currently also
- // tied to a code body, so we get a crash instead of incorrect results.)
- //
- // Why is this a bad solution? Because if the lifetime of the ExplodedGraph
- // changes, things will start going wrong again. Really the lifetime of this
- // log needs to be tied to either the specific nodes in it or the entire
- // ExplodedGraph, not to a specific part of the code being analyzed.
- //
- // (Also, having stateful local data means that the same checker can't be
- // used from multiple threads, but a lot of checkers have incorrect
- // assumptions about that anyway. So that wasn't a priority at the time of
- // this fix.)
- //
- // This happens at the end of analysis, but bug reports are emitted /after/
- // this point. So we can't just clear the summary log now. Instead, we mark
- // that the next time we access the summary log, it should be cleared.
-
- // If we never reset the summary log during /this/ code body analysis,
- // there were no new summaries. There might still have been summaries from
- // the /last/ analysis, so clear them out to make sure the bug report
- // visitors don't get confused.
- if (ShouldResetSummaryLog)
- SummaryLog.clear();
-
- ShouldResetSummaryLog = !SummaryLog.empty();
- }
+ ExprEngine &Eng) const;
- CFRefBug *getLeakWithinFunctionBug(const LangOptions &LOpts) const {
- if (!leakWithinFunction)
- leakWithinFunction.reset(new Leak(this, "Leak"));
- return leakWithinFunction.get();
- }
+ CFRefBug *getLeakWithinFunctionBug(const LangOptions &LOpts) const;
- CFRefBug *getLeakAtReturnBug(const LangOptions &LOpts) const {
- if (!leakAtReturn)
- leakAtReturn.reset(new Leak(this, "Leak of returned object"));
- return leakAtReturn.get();
- }
+ CFRefBug *getLeakAtReturnBug(const LangOptions &LOpts) const;
RetainSummaryManager &getSummaryManager(ASTContext &Ctx) const {
// FIXME: We don't support ARC being turned on and off during one analysis.
@@ -341,7 +292,7 @@ public:
bool ARCEnabled = (bool)Ctx.getLangOpts().ObjCAutoRefCount;
if (!Summaries) {
Summaries.reset(new RetainSummaryManager(
- Ctx, ARCEnabled, shouldCheckOSObjectRetainCount()));
+ Ctx, ARCEnabled, TrackObjCAndCFObjects, TrackOSObjects));
} else {
assert(Summaries->isARCEnabled() == ARCEnabled);
}
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index 0be37ff65c..9dff0be138 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -28,16 +28,302 @@ static bool isNumericLiteralExpression(const Expr *E) {
isa<CXXBoolLiteralExpr>(E);
}
+/// If type represents a pointer to CXXRecordDecl,
+/// and is not a typedef, return the decl name.
+/// Otherwise, return the serialization of type.
+static std::string getPrettyTypeName(QualType QT) {
+ QualType PT = QT->getPointeeType();
+ if (!PT.isNull() && !QT->getAs<TypedefType>())
+ if (const auto *RD = PT->getAsCXXRecordDecl())
+ return RD->getName();
+ return QT.getAsString();
+}
+
+/// Write information about the type state change to {@code os},
+/// return whether the note should be generated.
+static bool shouldGenerateNote(llvm::raw_string_ostream &os,
+ const RefVal *PrevT, const RefVal &CurrV,
+ SmallVector<ArgEffect, 2> &AEffects) {
+ // Get the previous type state.
+ RefVal PrevV = *PrevT;
+
+ // Specially handle -dealloc.
+ if (std::find(AEffects.begin(), AEffects.end(), Dealloc) != AEffects.end()) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
+ // We may not have transitioned to 'release' if we hit an error.
+ // This case is handled elsewhere.
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCombinedCounts() == 0);
+ os << "Object released by directly sending the '-dealloc' message";
+ return true;
+ }
+ }
+
+ // Determine if the typestate has changed.
+ if (!PrevV.hasSameState(CurrV))
+ switch (CurrV.getKind()) {
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+ if (PrevV.getCount() == CurrV.getCount()) {
+ // Did an autorelease message get sent?
+ if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
+ return false;
+
+ assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
+ os << "Object autoreleased";
+ return true;
+ }
+
+ if (PrevV.getCount() > CurrV.getCount())
+ os << "Reference count decremented.";
+ else
+ os << "Reference count incremented.";
+
+ if (unsigned Count = CurrV.getCount())
+ os << " The object now has a +" << Count << " retain count.";
+
+ return true;
+
+ case RefVal::Released:
+ if (CurrV.getIvarAccessHistory() ==
+ RefVal::IvarAccessHistory::ReleasedAfterDirectAccess &&
+ CurrV.getIvarAccessHistory() != PrevV.getIvarAccessHistory()) {
+ os << "Strong instance variable relinquished. ";
+ }
+ os << "Object released.";
+ return true;
+
+ case RefVal::ReturnedOwned:
+ // Autoreleases can be applied after marking a node ReturnedOwned.
+ if (CurrV.getAutoreleaseCount())
+ return false;
+
+ os << "Object returned to caller as an owning reference (single "
+ "retain count transferred to caller)";
+ return true;
+
+ case RefVal::ReturnedNotOwned:
+ os << "Object returned to caller with a +0 retain count";
+ return true;
+
+ default:
+ return false;
+ }
+ return true;
+}
+
+static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
+ const LocationContext *LCtx,
+ const RefVal &CurrV, SymbolRef &Sym,
+ const Stmt *S,
+ llvm::raw_string_ostream &os) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Get the name of the callee (if it is available)
+ // from the tracked SVal.
+ SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
+ const FunctionDecl *FD = X.getAsFunctionDecl();
+
+ // If failed, try to get it from AST.
+ if (!FD)
+ FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(CE->getCalleeDecl())) {
+ os << "Call to method '" << MD->getQualifiedNameAsString() << '\'';
+ } else if (FD) {
+ os << "Call to function '" << FD->getQualifiedNameAsString() << '\'';
+ } else {
+ os << "function call";
+ }
+ } else if (isa<CXXNewExpr>(S)) {
+ os << "Operator 'new'";
+ } else {
+ assert(isa<ObjCMessageExpr>(S));
+ CallEventManager &Mgr = CurrSt->getStateManager().getCallEventManager();
+ CallEventRef<ObjCMethodCall> Call =
+ Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
+
+ switch (Call->getMessageKind()) {
+ case OCM_Message:
+ os << "Method";
+ break;
+ case OCM_PropertyAccess:
+ os << "Property";
+ break;
+ case OCM_Subscript:
+ os << "Subscript";
+ break;
+ }
+ }
+
+ if (CurrV.getObjKind() == RetEffect::CF) {
+ os << " returns a Core Foundation object of type "
+ << Sym->getType().getAsString() << " with a ";
+ } else if (CurrV.getObjKind() == RetEffect::OS) {
+ os << " returns an OSObject of type " << getPrettyTypeName(Sym->getType())
+ << " with a ";
+ } else if (CurrV.getObjKind() == RetEffect::Generalized) {
+ os << " returns an object of type " << Sym->getType().getAsString()
+ << " with a ";
+ } else {
+ assert(CurrV.getObjKind() == RetEffect::ObjC);
+ QualType T = Sym->getType();
+ if (!isa<ObjCObjectPointerType>(T)) {
+ os << " returns an Objective-C object with a ";
+ } else {
+ const ObjCObjectPointerType *PT = cast<ObjCObjectPointerType>(T);
+ os << " returns an instance of " << PT->getPointeeType().getAsString()
+ << " with a ";
+ }
+ }
+
+ if (CurrV.isOwned()) {
+ os << "+1 retain count";
+ } else {
+ assert(CurrV.isNotOwned());
+ os << "+0 retain count";
+ }
+}
+
+namespace clang {
+namespace ento {
+namespace retaincountchecker {
+
+class CFRefReportVisitor : public BugReporterVisitor {
+protected:
+ SymbolRef Sym;
+ const SummaryLogTy &SummaryLog;
+
+public:
+ CFRefReportVisitor(SymbolRef sym, const SummaryLogTy &log)
+ : Sym(sym), SummaryLog(log) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int x = 0;
+ ID.AddPointer(&x);
+ ID.AddPointer(Sym);
+ }
+
+ std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override;
+};
+
+class CFRefLeakReportVisitor : public CFRefReportVisitor {
+public:
+ CFRefLeakReportVisitor(SymbolRef sym,
+ const SummaryLogTy &log)
+ : CFRefReportVisitor(sym, log) {}
+
+ std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override;
+};
+
+} // end namespace retaincountchecker
+} // end namespace ento
+} // end namespace clang
+
+
+/// Find the first node with the parent stack frame.
+static const ExplodedNode *getCalleeNode(const ExplodedNode *Pred) {
+ const StackFrameContext *SC = Pred->getStackFrame();
+ if (SC->inTopFrame())
+ return nullptr;
+ const StackFrameContext *PC = SC->getParent()->getStackFrame();
+ if (!PC)
+ return nullptr;
+
+ const ExplodedNode *N = Pred;
+ while (N && N->getStackFrame() != PC) {
+ N = N->getFirstPred();
+ }
+ return N;
+}
+
+
+/// Insert a diagnostic piece at function exit
+/// if a function parameter is annotated as "os_consumed",
+/// but it does not actually consume the reference.
+static std::shared_ptr<PathDiagnosticEventPiece>
+annotateConsumedSummaryMismatch(const ExplodedNode *N,
+ CallExitBegin &CallExitLoc,
+ const SourceManager &SM,
+ CallEventManager &CEMgr) {
+
+ const ExplodedNode *CN = getCalleeNode(N);
+ if (!CN)
+ return nullptr;
+
+ CallEventRef<> Call = CEMgr.getCaller(N->getStackFrame(), N->getState());
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ ArrayRef<const ParmVarDecl *> Parameters = Call->parameters();
+ for (unsigned I=0; I < Call->getNumArgs() && I < Parameters.size(); ++I) {
+ const ParmVarDecl *PVD = Parameters[I];
+
+ if (!PVD->hasAttr<OSConsumedAttr>())
+ return nullptr;
+
+ if (SymbolRef SR = Call->getArgSVal(I).getAsLocSymbol()) {
+ const RefVal *CountBeforeCall = getRefBinding(CN->getState(), SR);
+ const RefVal *CountAtExit = getRefBinding(N->getState(), SR);
+
+ if (!CountBeforeCall || !CountAtExit)
+ continue;
+
+ unsigned CountBefore = CountBeforeCall->getCount();
+ unsigned CountAfter = CountAtExit->getCount();
+
+ bool AsExpected = CountBefore > 0 && CountAfter == CountBefore - 1;
+ if (!AsExpected) {
+ os << "Parameter '";
+ PVD->getNameForDiagnostic(os, PVD->getASTContext().getPrintingPolicy(),
+ /*Qualified=*/false);
+ os << "' is marked as consuming, but the function does not consume "
+ << "the reference\n";
+ }
+ }
+ }
+
+ if (os.str().empty())
+ return nullptr;
+
+ // FIXME: remove the code duplication with NoStoreFuncVisitor.
+ PathDiagnosticLocation L;
+ if (const ReturnStmt *RS = CallExitLoc.getReturnStmt()) {
+ L = PathDiagnosticLocation::createBegin(RS, SM, N->getLocationContext());
+ } else {
+ L = PathDiagnosticLocation(
+ Call->getRuntimeDefinition().getDecl()->getSourceRange().getEnd(), SM);
+ }
+
+ return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+}
+
std::shared_ptr<PathDiagnosticPiece>
CFRefReportVisitor::VisitNode(const ExplodedNode *N,
BugReporterContext &BRC, BugReport &BR) {
+ const SourceManager &SM = BRC.getSourceManager();
+ CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
+ if (auto CE = N->getLocationAs<CallExitBegin>()) {
+ if (auto PD = annotateConsumedSummaryMismatch(N, *CE, SM, CEMgr))
+ return PD;
+ }
+
// FIXME: We will eventually need to handle non-statement-based events
// (__attribute__((cleanup))).
if (!N->getLocation().getAs<StmtPoint>())
return nullptr;
// Check if the type state has changed.
- ProgramStateRef PrevSt = N->getFirstPred()->getState();
+ const ExplodedNode *PrevNode = N->getFirstPred();
+ ProgramStateRef PrevSt = PrevNode->getState();
ProgramStateRef CurrSt = N->getState();
const LocationContext *LCtx = N->getLocationContext();
@@ -64,11 +350,9 @@ CFRefReportVisitor::VisitNode(const ExplodedNode *N,
if (isa<ObjCArrayLiteral>(S)) {
os << "NSArray literal is an object with a +0 retain count";
- }
- else if (isa<ObjCDictionaryLiteral>(S)) {
+ } else if (isa<ObjCDictionaryLiteral>(S)) {
os << "NSDictionary literal is an object with a +0 retain count";
- }
- else if (const ObjCBoxedExpr *BL = dyn_cast<ObjCBoxedExpr>(S)) {
+ } else if (const ObjCBoxedExpr *BL = dyn_cast<ObjCBoxedExpr>(S)) {
if (isNumericLiteralExpression(BL->getSubExpr()))
os << "NSNumber literal is an object with a +0 retain count";
else {
@@ -78,83 +362,27 @@ CFRefReportVisitor::VisitNode(const ExplodedNode *N,
// We should always be able to find the boxing class interface,
// but consider this future-proofing.
- if (BoxClass)
+ if (BoxClass) {
os << *BoxClass << " b";
- else
+ } else {
os << "B";
+ }
os << "oxed expression produces an object with a +0 retain count";
}
- }
- else if (isa<ObjCIvarRefExpr>(S)) {
+ } else if (isa<ObjCIvarRefExpr>(S)) {
os << "Object loaded from instance variable";
- }
- else {
- if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
- // Get the name of the callee (if it is available).
- SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
- if (const FunctionDecl *FD = X.getAsFunctionDecl())
- os << "Call to function '" << *FD << '\'';
- else
- os << "function call";
- }
- else {
- assert(isa<ObjCMessageExpr>(S));
- CallEventManager &Mgr = CurrSt->getStateManager().getCallEventManager();
- CallEventRef<ObjCMethodCall> Call
- = Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
-
- switch (Call->getMessageKind()) {
- case OCM_Message:
- os << "Method";
- break;
- case OCM_PropertyAccess:
- os << "Property";
- break;
- case OCM_Subscript:
- os << "Subscript";
- break;
- }
- }
-
- if (CurrV.getObjKind() == RetEffect::CF) {
- os << " returns a Core Foundation object of type "
- << Sym->getType().getAsString() << " with a ";
- } else if (CurrV.getObjKind() == RetEffect::OS) {
- os << " returns an OSObject of type "
- << Sym->getType().getAsString() << " with a ";
- } else if (CurrV.getObjKind() == RetEffect::Generalized) {
- os << " returns an object of type " << Sym->getType().getAsString()
- << " with a ";
- } else {
- assert (CurrV.getObjKind() == RetEffect::ObjC);
- QualType T = Sym->getType();
- if (!isa<ObjCObjectPointerType>(T)) {
- os << " returns an Objective-C object with a ";
- } else {
- const ObjCObjectPointerType *PT = cast<ObjCObjectPointerType>(T);
- os << " returns an instance of "
- << PT->getPointeeType().getAsString() << " with a ";
- }
- }
-
- if (CurrV.isOwned()) {
- os << "+1 retain count";
- } else {
- assert (CurrV.isNotOwned());
- os << "+0 retain count";
- }
+ } else {
+ generateDiagnosticsForCallLike(CurrSt, LCtx, CurrV, Sym, S, os);
}
- PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
- N->getLocationContext());
+ PathDiagnosticLocation Pos(S, SM, N->getLocationContext());
return std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
}
// Gather up the effects that were performed on the object at this
// program point
SmallVector<ArgEffect, 2> AEffects;
-
const ExplodedNode *OrigNode = BRC.getNodeResolver().getOriginalNode(N);
if (const RetainSummary *Summ = SummaryLog.lookup(OrigNode)) {
// We only have summaries attached to nodes after evaluating CallExpr and
@@ -166,8 +394,7 @@ CFRefReportVisitor::VisitNode(const ExplodedNode *N,
// was ever passed as an argument.
unsigned i = 0;
- for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
- AI!=AE; ++AI, ++i) {
+ for (auto AI=CE->arg_begin(), AE=CE->arg_end(); AI!=AE; ++AI, ++i) {
// Retrieve the value of the argument. Is it the symbol
// we are interested in?
@@ -188,75 +415,8 @@ CFRefReportVisitor::VisitNode(const ExplodedNode *N,
}
}
- do {
- // Get the previous type state.
- RefVal PrevV = *PrevT;
-
- // Specially handle -dealloc.
- if (std::find(AEffects.begin(), AEffects.end(), Dealloc) !=
- AEffects.end()) {
- // Determine if the object's reference count was pushed to zero.
- assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
- // We may not have transitioned to 'release' if we hit an error.
- // This case is handled elsewhere.
- if (CurrV.getKind() == RefVal::Released) {
- assert(CurrV.getCombinedCounts() == 0);
- os << "Object released by directly sending the '-dealloc' message";
- break;
- }
- }
-
- // Determine if the typestate has changed.
- if (!PrevV.hasSameState(CurrV))
- switch (CurrV.getKind()) {
- case RefVal::Owned:
- case RefVal::NotOwned:
- if (PrevV.getCount() == CurrV.getCount()) {
- // Did an autorelease message get sent?
- if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
- return nullptr;
-
- assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
- os << "Object autoreleased";
- break;
- }
-
- if (PrevV.getCount() > CurrV.getCount())
- os << "Reference count decremented.";
- else
- os << "Reference count incremented.";
-
- if (unsigned Count = CurrV.getCount())
- os << " The object now has a +" << Count << " retain count.";
-
- break;
-
- case RefVal::Released:
- if (CurrV.getIvarAccessHistory() ==
- RefVal::IvarAccessHistory::ReleasedAfterDirectAccess &&
- CurrV.getIvarAccessHistory() != PrevV.getIvarAccessHistory()) {
- os << "Strong instance variable relinquished. ";
- }
- os << "Object released.";
- break;
-
- case RefVal::ReturnedOwned:
- // Autoreleases can be applied after marking a node ReturnedOwned.
- if (CurrV.getAutoreleaseCount())
- return nullptr;
-
- os << "Object returned to caller as an owning reference (single "
- "retain count transferred to caller)";
- break;
-
- case RefVal::ReturnedNotOwned:
- os << "Object returned to caller with a +0 retain count";
- break;
-
- default:
- return nullptr;
- }
- } while (0);
+ if (!shouldGenerateNote(os, PrevT, CurrV, AEffects))
+ return nullptr;
if (os.str().empty())
return nullptr; // We have nothing to say!
@@ -303,9 +463,8 @@ struct AllocationInfo {
};
} // end anonymous namespace
-static AllocationInfo
-GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
- SymbolRef Sym) {
+static AllocationInfo GetAllocationSite(ProgramStateManager &StateMgr,
+ const ExplodedNode *N, SymbolRef Sym) {
const ExplodedNode *AllocationNode = N;
const ExplodedNode *AllocationNodeInCurrentOrParentContext = N;
const MemRegion *FirstBinding = nullptr;
@@ -327,11 +486,11 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
if (FB) {
const MemRegion *R = FB.getRegion();
- const VarRegion *VR = R->getBaseRegion()->getAs<VarRegion>();
// Do not show local variables belonging to a function other than
// where the error is reported.
- if (!VR || VR->getStackFrame() == LeakContext->getStackFrame())
- FirstBinding = R;
+ if (auto MR = dyn_cast<StackSpaceRegion>(R->getMemorySpace()))
+ if (MR->getStackFrame() == LeakContext->getStackFrame())
+ FirstBinding = R;
}
// AllocationNode is the last node in which the symbol was tracked.
@@ -340,7 +499,7 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
// AllocationNodeInCurrentContext, is the last node in the current or
// parent context in which the symbol was tracked.
//
- // Note that the allocation site might be in the parent conext. For example,
+ // Note that the allocation site might be in the parent context. For example,
// the case where an allocation happens in a block that captures a reference
// to it and that reference is overwritten/dropped by another call to
// the block.
@@ -350,9 +509,9 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
// Find the last init that was called on the given symbol and store the
// init method's location context.
if (!InitMethodContext)
- if (Optional<CallEnter> CEP = N->getLocation().getAs<CallEnter>()) {
+ if (auto CEP = N->getLocation().getAs<CallEnter>()) {
const Stmt *CE = CEP->getCallExpr();
- if (const ObjCMessageExpr *ME = dyn_cast_or_null<ObjCMessageExpr>(CE)) {
+ if (const auto *ME = dyn_cast_or_null<ObjCMessageExpr>(CE)) {
const Stmt *RecExpr = ME->getInstanceReceiver();
if (RecExpr) {
SVal RecV = St->getSVal(RecExpr, NContext);
@@ -362,7 +521,7 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
}
}
- N = N->pred_empty() ? nullptr : *(N->pred_begin());
+ N = N->getFirstPred();
}
// If we are reporting a leak of the object that was allocated with alloc,
@@ -379,9 +538,11 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
// If allocation happened in a function different from the leak node context,
// do not report the binding.
assert(N && "Could not find allocation node");
- if (N->getLocationContext() != LeakContext) {
+
+ if (AllocationNodeInCurrentOrParentContext &&
+ AllocationNodeInCurrentOrParentContext->getLocationContext() !=
+ LeakContext)
FirstBinding = nullptr;
- }
return AllocationInfo(AllocationNodeInCurrentOrParentContext,
FirstBinding,
@@ -406,8 +567,7 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
// We are reporting a leak. Walk up the graph to get to the first node where
// the symbol appeared, and also get the first VarDecl that tracked object
// is stored to.
- AllocationInfo AllocI =
- GetAllocationSite(BRC.getStateManager(), EndN, Sym);
+ AllocationInfo AllocI = GetAllocationSite(BRC.getStateManager(), EndN, Sym);
const MemRegion* FirstBinding = AllocI.R;
BR.markInteresting(AllocI.InterestingMethodContext);
@@ -428,9 +588,9 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
Optional<std::string> RegionDescription = describeRegion(FirstBinding);
if (RegionDescription) {
os << "object allocated and stored into '" << *RegionDescription << '\'';
+ } else {
+ os << "allocated object of type " << getPrettyTypeName(Sym->getType());
}
- else
- os << "allocated object";
// Get the retain count.
const RefVal* RV = getRefBinding(EndN->getState(), Sym);
@@ -445,11 +605,13 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
: " is returned from a function ");
- if (D->hasAttr<CFReturnsNotRetainedAttr>())
+ if (D->hasAttr<CFReturnsNotRetainedAttr>()) {
os << "that is annotated as CF_RETURNS_NOT_RETAINED";
- else if (D->hasAttr<NSReturnsNotRetainedAttr>())
+ } else if (D->hasAttr<NSReturnsNotRetainedAttr>()) {
os << "that is annotated as NS_RETURNS_NOT_RETAINED";
- else {
+ } else if (D->hasAttr<OSReturnsNotRetainedAttr>()) {
+ os << "that is annotated as OS_RETURNS_NOT_RETAINED";
+ } else {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
if (BRC.getASTContext().getLangOpts().ObjCAutoRefCount) {
os << "managed by Automatic Reference Counting";
@@ -468,14 +630,30 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
" Foundation";
}
}
- }
- else
+ } else {
os << " is not referenced later in this execution path and has a retain "
"count of +" << RV->getCount();
+ }
return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
}
+CFRefReport::CFRefReport(CFRefBug &D, const LangOptions &LOpts,
+ const SummaryLogTy &Log, ExplodedNode *n,
+ SymbolRef sym, bool registerVisitor)
+ : BugReport(D, D.getDescription(), n), Sym(sym) {
+ if (registerVisitor)
+ addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, Log));
+}
+
+CFRefReport::CFRefReport(CFRefBug &D, const LangOptions &LOpts,
+ const SummaryLogTy &Log, ExplodedNode *n,
+ SymbolRef sym, StringRef endText)
+ : BugReport(D, D.getDescription(), endText, n) {
+
+ addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, Log));
+}
+
void CFRefLeakReport::deriveParamLocation(CheckerContext &Ctx, SymbolRef sym) {
const SourceManager& SMgr = Ctx.getSourceManager();
@@ -494,7 +672,8 @@ void CFRefLeakReport::deriveParamLocation(CheckerContext &Ctx, SymbolRef sym) {
}
}
-void CFRefLeakReport::deriveAllocLocation(CheckerContext &Ctx,SymbolRef sym) {
+void CFRefLeakReport::deriveAllocLocation(CheckerContext &Ctx,
+ SymbolRef sym) {
// Most bug reports are cached at the location where they occurred.
// With leaks, we want to unique them by the location where they were
// allocated, and only report a single path. To do this, we need to find
@@ -508,7 +687,7 @@ void CFRefLeakReport::deriveAllocLocation(CheckerContext &Ctx,SymbolRef sym) {
const SourceManager& SMgr = Ctx.getSourceManager();
AllocationInfo AllocI =
- GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
+ GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
AllocNode = AllocI.N;
AllocBinding = AllocI.R;
@@ -536,8 +715,7 @@ void CFRefLeakReport::deriveAllocLocation(CheckerContext &Ctx,SymbolRef sym) {
UniqueingDecl = AllocNode->getLocationContext()->getDecl();
}
-void CFRefLeakReport::createDescription(CheckerContext &Ctx,
- bool IncludeAllocationLine) {
+void CFRefLeakReport::createDescription(CheckerContext &Ctx) {
assert(Location.isValid() && UniqueingDecl && UniqueingLocation.isValid());
Description.clear();
llvm::raw_string_ostream os(Description);
@@ -546,25 +724,24 @@ void CFRefLeakReport::createDescription(CheckerContext &Ctx,
Optional<std::string> RegionDescription = describeRegion(AllocBinding);
if (RegionDescription) {
os << " stored into '" << *RegionDescription << '\'';
- if (IncludeAllocationLine) {
- FullSourceLoc SL(AllocStmt->getBeginLoc(), Ctx.getSourceManager());
- os << " (allocated on line " << SL.getSpellingLineNumber() << ")";
- }
+ } else {
+
+ // If we can't figure out the name, just supply the type information.
+ os << " of type " << getPrettyTypeName(Sym->getType());
}
}
CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
const SummaryLogTy &Log,
ExplodedNode *n, SymbolRef sym,
- CheckerContext &Ctx,
- bool IncludeAllocationLine)
+ CheckerContext &Ctx)
: CFRefReport(D, LOpts, Log, n, sym, false) {
deriveAllocLocation(Ctx, sym);
if (!AllocBinding)
deriveParamLocation(Ctx, sym);
- createDescription(Ctx, IncludeAllocationLine);
+ createDescription(Ctx);
addVisitor(llvm::make_unique<CFRefLeakReportVisitor>(sym, Log));
}
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
index 58abd67039..a30f62ac34 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
@@ -37,126 +37,21 @@ public:
virtual bool isLeak() const { return false; }
};
-class UseAfterRelease : public CFRefBug {
-public:
- UseAfterRelease(const CheckerBase *checker)
- : CFRefBug(checker, "Use-after-release") {}
-
- const char *getDescription() const override {
- return "Reference-counted object is used after it is released";
- }
-};
-
-class BadRelease : public CFRefBug {
-public:
- BadRelease(const CheckerBase *checker) : CFRefBug(checker, "Bad release") {}
-
- const char *getDescription() const override {
- return "Incorrect decrement of the reference count of an object that is "
- "not owned at this point by the caller";
- }
-};
-
-class DeallocNotOwned : public CFRefBug {
-public:
- DeallocNotOwned(const CheckerBase *checker)
- : CFRefBug(checker, "-dealloc sent to non-exclusively owned object") {}
-
- const char *getDescription() const override {
- return "-dealloc sent to object that may be referenced elsewhere";
- }
-};
-
-class OverAutorelease : public CFRefBug {
-public:
- OverAutorelease(const CheckerBase *checker)
- : CFRefBug(checker, "Object autoreleased too many times") {}
-
- const char *getDescription() const override {
- return "Object autoreleased too many times";
- }
-};
-
-class ReturnedNotOwnedForOwned : public CFRefBug {
-public:
- ReturnedNotOwnedForOwned(const CheckerBase *checker)
- : CFRefBug(checker, "Method should return an owned object") {}
-
- const char *getDescription() const override {
- return "Object with a +0 retain count returned to caller where a +1 "
- "(owning) retain count is expected";
- }
-};
-
-class Leak : public CFRefBug {
-public:
- Leak(const CheckerBase *checker, StringRef name) : CFRefBug(checker, name) {
- // Leaks should not be reported if they are post-dominated by a sink.
- setSuppressOnSink(true);
- }
-
- const char *getDescription() const override { return ""; }
-
- bool isLeak() const override { return true; }
-};
-
typedef ::llvm::DenseMap<const ExplodedNode *, const RetainSummary *>
SummaryLogTy;
-/// Visitors.
-
-class CFRefReportVisitor : public BugReporterVisitor {
+class CFRefReport : public BugReport {
protected:
SymbolRef Sym;
- const SummaryLogTy &SummaryLog;
-
-public:
- CFRefReportVisitor(SymbolRef sym, const SummaryLogTy &log)
- : Sym(sym), SummaryLog(log) {}
-
- void Profile(llvm::FoldingSetNodeID &ID) const override {
- static int x = 0;
- ID.AddPointer(&x);
- ID.AddPointer(Sym);
- }
-
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC,
- BugReport &BR) override;
-
- std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) override;
-};
-
-class CFRefLeakReportVisitor : public CFRefReportVisitor {
-public:
- CFRefLeakReportVisitor(SymbolRef sym,
- const SummaryLogTy &log)
- : CFRefReportVisitor(sym, log) {}
-
- std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) override;
-};
-
-class CFRefReport : public BugReport {
public:
CFRefReport(CFRefBug &D, const LangOptions &LOpts,
const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
- bool registerVisitor = true)
- : BugReport(D, D.getDescription(), n) {
- if (registerVisitor)
- addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, Log));
- }
+ bool registerVisitor = true);
CFRefReport(CFRefBug &D, const LangOptions &LOpts,
const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
- StringRef endText)
- : BugReport(D, D.getDescription(), endText, n) {
- addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, Log));
- }
+ StringRef endText);
llvm::iterator_range<ranges_iterator> getRanges() override {
const CFRefBug& BugTy = static_cast<CFRefBug&>(getBugType());
@@ -176,13 +71,12 @@ class CFRefLeakReport : public CFRefReport {
// Finds the location where a leak warning for 'sym' should be raised.
void deriveAllocLocation(CheckerContext &Ctx, SymbolRef sym);
// Produces description of a leak warning which is printed on the console.
- void createDescription(CheckerContext &Ctx, bool IncludeAllocationLine);
+ void createDescription(CheckerContext &Ctx);
public:
CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
- CheckerContext &Ctx,
- bool IncludeAllocationLine);
+ CheckerContext &Ctx);
PathDiagnosticLocation getLocation(const SourceManager &SM) const override {
assert(Location.isValid());
diff --git a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index c5e826a84b..e866f06ebb 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -87,7 +87,7 @@ static void emitBug(CheckerContext &C, BuiltinBug &BT, const Expr *RetE,
auto Report = llvm::make_unique<BugReport>(BT, BT.getDescription(), N);
Report->addRange(RetE->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, TrackingE ? TrackingE : RetE, *Report);
+ bugreporter::trackExpressionValue(N, TrackingE ? TrackingE : RetE, *Report);
C.emitReport(std::move(Report));
}
diff --git a/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp b/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
index 55516a34d1..3f3477b928 100644
--- a/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
@@ -58,13 +58,12 @@ public:
} // end anonymous namespace
-
-using TriBoolTy = Optional<bool>;
-using MemoizationMapTy = llvm::DenseMap<const Stmt *, Optional<TriBoolTy>>;
-
-static TriBoolTy
-seenBeforeRec(const Stmt *Parent, const Stmt *A, const Stmt *B,
- MemoizationMapTy &Memoization) {
+/// \return Whether {@code A} occurs before {@code B} in traversal of
+/// {@code Parent}.
+/// Conceptually a very incomplete/unsound approximation of happens-before
+/// relationship (A is likely to be evaluated before B),
+/// but useful enough in this case.
+static bool seenBefore(const Stmt *Parent, const Stmt *A, const Stmt *B) {
for (const Stmt *C : Parent->children()) {
if (!C) continue;
@@ -74,26 +73,9 @@ seenBeforeRec(const Stmt *Parent, const Stmt *A, const Stmt *B,
if (C == B)
return false;
- Optional<TriBoolTy> &Cached = Memoization[C];
- if (!Cached)
- Cached = seenBeforeRec(C, A, B, Memoization);
-
- if (Cached->hasValue())
- return Cached->getValue();
+ return seenBefore(C, A, B);
}
-
- return None;
-}
-
-/// \return Whether {@code A} occurs before {@code B} in traversal of
-/// {@code Parent}.
-/// Conceptually a very incomplete/unsound approximation of happens-before
-/// relationship (A is likely to be evaluated before B),
-/// but useful enough in this case.
-static bool seenBefore(const Stmt *Parent, const Stmt *A, const Stmt *B) {
- MemoizationMapTy Memoization;
- TriBoolTy Val = seenBeforeRec(Parent, A, B, Memoization);
- return Val.getValue();
+ return false;
}
static void emitDiagnostics(BoundNodes &Match,
diff --git a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index d77975559e..b383411068 100644
--- a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -383,26 +383,26 @@ ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
// TODO: Clean up the state.
- for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
- E = SymReaper.dead_end(); I != E; ++I) {
- SymbolRef Sym = *I;
- ProgramStateRef state = C.getState();
- const StreamState *SS = state->get<StreamMap>(Sym);
- if (!SS)
+ const StreamMapTy &Map = state->get<StreamMap>();
+ for (const auto &I: Map) {
+ SymbolRef Sym = I.first;
+ const StreamState &SS = I.second;
+ if (!SymReaper.isDead(Sym) || !SS.isOpened())
continue;
- if (SS->isOpened()) {
- ExplodedNode *N = C.generateErrorNode();
- if (N) {
- if (!BT_ResourceLeak)
- BT_ResourceLeak.reset(new BuiltinBug(
- this, "Resource Leak",
- "Opened File never closed. Potential Resource leak."));
- C.emitReport(llvm::make_unique<BugReport>(
- *BT_ResourceLeak, BT_ResourceLeak->getDescription(), N));
- }
- }
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ if (!BT_ResourceLeak)
+ BT_ResourceLeak.reset(
+ new BuiltinBug(this, "Resource Leak",
+ "Opened File never closed. Potential Resource leak."));
+ C.emitReport(llvm::make_unique<BugReport>(
+ *BT_ResourceLeak, BT_ResourceLeak->getDescription(), N));
}
}
diff --git a/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp b/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
index eed1efd10e..515c98cd11 100644
--- a/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
@@ -212,20 +212,26 @@ private:
/// the negation of \p Antecedent.
/// Checks NonNullImplicationMap and assumes \p Antecedent otherwise.
ProgramStateRef addImplication(SymbolRef Antecedent,
- ProgramStateRef State,
+ ProgramStateRef InputState,
bool Negated) const {
- SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ if (!InputState)
+ return nullptr;
+ SValBuilder &SVB = InputState->getStateManager().getSValBuilder();
const SymbolRef *Consequent =
- Negated ? State->get<NonNullImplicationMap>(Antecedent)
- : State->get<NullImplicationMap>(Antecedent);
+ Negated ? InputState->get<NonNullImplicationMap>(Antecedent)
+ : InputState->get<NullImplicationMap>(Antecedent);
if (!Consequent)
- return State;
+ return InputState;
SVal AntecedentV = SVB.makeSymbolVal(Antecedent);
- if ((Negated && State->isNonNull(AntecedentV).isConstrainedTrue())
- || (!Negated && State->isNull(AntecedentV).isConstrainedTrue())) {
+ ProgramStateRef State = InputState;
+
+ if ((Negated && InputState->isNonNull(AntecedentV).isConstrainedTrue())
+ || (!Negated && InputState->isNull(AntecedentV).isConstrainedTrue())) {
SVal ConsequentS = SVB.makeSymbolVal(*Consequent);
- State = State->assume(ConsequentS.castAs<DefinedSVal>(), Negated);
+ State = InputState->assume(ConsequentS.castAs<DefinedSVal>(), Negated);
+ if (!State)
+ return nullptr;
// Drop implications from the map.
if (Negated) {
diff --git a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 934ee63318..9e75bba5eb 100644
--- a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -98,7 +98,7 @@ void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
// Emit the bug report.
auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
- bugreporter::trackNullOrUndefValue(N, Ex, *R);
+ bugreporter::trackExpressionValue(N, Ex, *R);
R->addRange(Ex->getSourceRange());
Ctx.emitReport(std::move(R));
diff --git a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 47faf699f9..f30f32e959 100644
--- a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -174,10 +174,10 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
auto report = llvm::make_unique<BugReport>(*BT, OS.str(), N);
if (Ex) {
report->addRange(Ex->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, Ex, *report);
+ bugreporter::trackExpressionValue(N, Ex, *report);
}
else
- bugreporter::trackNullOrUndefValue(N, B, *report);
+ bugreporter::trackExpressionValue(N, B, *report);
C.emitReport(std::move(report));
}
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index fe07eafd28..5a704eb41c 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -55,7 +55,7 @@ UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
// Generate a report for this bug.
auto R = llvm::make_unique<BugReport>(*BT, BT->getName(), N);
R->addRange(A->getIdx()->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, A->getIdx(), *R);
+ bugreporter::trackExpressionValue(N, A->getIdx(), *R);
C.emitReport(std::move(R));
}
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 2ef6855ba6..a0bc857c49 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -112,7 +112,7 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
auto R = llvm::make_unique<BugReport>(*BT, OS.str(), N);
if (ex) {
R->addRange(ex->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, ex, *R);
+ bugreporter::trackExpressionValue(N, ex, *R);
}
C.emitReport(std::move(R));
}
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
index d10b862ea0..c3291a21c1 100644
--- a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
@@ -21,7 +21,7 @@
// `-analyzer-config alpha.cplusplus.UninitializedObject:Pedantic=true`.
//
// - "NotesAsWarnings" (boolean). If set to true, the checker will emit a
-// warning for each uninitalized field, as opposed to emitting one warning
+// warning for each uninitialized field, as opposed to emitting one warning
// per constructor call, and listing the uninitialized fields that belongs
// to it in notes. Defaults to false.
//
@@ -215,7 +215,11 @@ public:
const TypedValueRegion *const R,
const UninitObjCheckerOptions &Opts);
- const UninitFieldMap &getUninitFields() { return UninitFields; }
+ /// Returns with the modified state and a map of (uninitialized region,
+ /// note message) pairs.
+ std::pair<ProgramStateRef, const UninitFieldMap &> getResults() {
+ return {State, UninitFields};
+ }
/// Returns whether the analyzed region contains at least one initialized
/// field. Note that this includes subfields as well, not just direct ones,
@@ -230,7 +234,7 @@ private:
// * every node is an object that is
// - a union
// - a non-union record
- // - dereferencable (see isDereferencableType())
+ // - dereferenceable (see isDereferencableType())
// - an array
// - of a primitive type (see isPrimitiveType())
// * the parent of each node is the object that contains it
@@ -271,7 +275,7 @@ private:
// this->iptr (pointee uninit)
// this->bptr (pointer uninit)
//
- // We'll traverse each node of the above graph with the appropiate one of
+ // We'll traverse each node of the above graph with the appropriate one of
// these methods:
/// Checks the region of a union object, and returns true if no field is
@@ -296,14 +300,16 @@ private:
// TODO: Add a support for nonloc::LocAsInteger.
/// Processes LocalChain and attempts to insert it into UninitFields. Returns
- /// true on success.
+ /// true on success. Also adds the head of the list and \p PointeeR (if
+ /// supplied) to the GDM as already analyzed objects.
///
/// Since this class analyzes regions with recursion, we'll only store
/// references to temporary FieldNode objects created on the stack. This means
/// that after analyzing a leaf of the directed tree described above, the
/// elements LocalChain references will be destructed, so we can't store it
/// directly.
- bool addFieldToUninits(FieldChainInfo LocalChain);
+ bool addFieldToUninits(FieldChainInfo LocalChain,
+ const MemRegion *PointeeR = nullptr);
};
/// Returns true if T is a primitive type. An object of a primitive type only
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
index 50ab7c0a0e..94f664ab93 100644
--- a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
@@ -28,9 +28,14 @@
using namespace clang;
using namespace clang::ento;
+/// We'll mark fields (and pointee of fields) that are confirmed to be
+/// uninitialized as already analyzed.
+REGISTER_SET_WITH_PROGRAMSTATE(AnalyzedRegions, const MemRegion *)
+
namespace {
-class UninitializedObjectChecker : public Checker<check::EndFunction> {
+class UninitializedObjectChecker
+ : public Checker<check::EndFunction, check::DeadSymbols> {
std::unique_ptr<BuiltinBug> BT_uninitField;
public:
@@ -39,7 +44,9 @@ public:
UninitializedObjectChecker()
: BT_uninitField(new BuiltinBug(this, "Uninitialized fields")) {}
+
void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
};
/// A basic field type, that is not a pointer or a reference, it's dynamic and
@@ -96,12 +103,11 @@ public:
// Utility function declarations.
-/// Returns the object that was constructed by CtorDecl, or None if that isn't
-/// possible.
-// TODO: Refactor this function so that it returns the constructed object's
-// region.
-static Optional<nonloc::LazyCompoundVal>
-getObjectVal(const CXXConstructorDecl *CtorDecl, CheckerContext &Context);
+/// Returns the region that was constructed by CtorDecl, or nullptr if that
+/// isn't possible.
+static const TypedValueRegion *
+getConstructedRegion(const CXXConstructorDecl *CtorDecl,
+ CheckerContext &Context);
/// Checks whether the object constructed by \p Ctor will be analyzed later
/// (e.g. if the object is a field of another object, in which case we'd check
@@ -135,20 +141,26 @@ void UninitializedObjectChecker::checkEndFunction(
if (willObjectBeAnalyzedLater(CtorDecl, Context))
return;
- Optional<nonloc::LazyCompoundVal> Object = getObjectVal(CtorDecl, Context);
- if (!Object)
+ const TypedValueRegion *R = getConstructedRegion(CtorDecl, Context);
+ if (!R)
return;
- FindUninitializedFields F(Context.getState(), Object->getRegion(), Opts);
+ FindUninitializedFields F(Context.getState(), R, Opts);
+
+ std::pair<ProgramStateRef, const UninitFieldMap &> UninitInfo =
+ F.getResults();
- const UninitFieldMap &UninitFields = F.getUninitFields();
+ ProgramStateRef UpdatedState = UninitInfo.first;
+ const UninitFieldMap &UninitFields = UninitInfo.second;
- if (UninitFields.empty())
+ if (UninitFields.empty()) {
+ Context.addTransition(UpdatedState);
return;
+ }
// There are uninitialized fields in the record.
- ExplodedNode *Node = Context.generateNonFatalErrorNode(Context.getState());
+ ExplodedNode *Node = Context.generateNonFatalErrorNode(UpdatedState);
if (!Node)
return;
@@ -189,6 +201,15 @@ void UninitializedObjectChecker::checkEndFunction(
Context.emitReport(std::move(Report));
}
+void UninitializedObjectChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ for (const MemRegion *R : State->get<AnalyzedRegions>()) {
+ if (!SR.isLiveRegion(R))
+ State = State->remove<AnalyzedRegions>(R);
+ }
+}
+
//===----------------------------------------------------------------------===//
// Methods for FindUninitializedFields.
//===----------------------------------------------------------------------===//
@@ -206,17 +227,34 @@ FindUninitializedFields::FindUninitializedFields(
UninitFields.clear();
}
-bool FindUninitializedFields::addFieldToUninits(FieldChainInfo Chain) {
+bool FindUninitializedFields::addFieldToUninits(FieldChainInfo Chain,
+ const MemRegion *PointeeR) {
+ const FieldRegion *FR = Chain.getUninitRegion();
+
+ assert((PointeeR || !isDereferencableType(FR->getDecl()->getType())) &&
+ "One must also pass the pointee region as a parameter for "
+ "dereferenceable fields!");
+
+ if (State->contains<AnalyzedRegions>(FR))
+ return false;
+
+ if (PointeeR) {
+ if (State->contains<AnalyzedRegions>(PointeeR)) {
+ return false;
+ }
+ State = State->add<AnalyzedRegions>(PointeeR);
+ }
+
+ State = State->add<AnalyzedRegions>(FR);
+
if (State->getStateManager().getContext().getSourceManager().isInSystemHeader(
- Chain.getUninitRegion()->getDecl()->getLocation()))
+ FR->getDecl()->getLocation()))
return false;
UninitFieldMap::mapped_type NoteMsgBuf;
llvm::raw_svector_ostream OS(NoteMsgBuf);
Chain.printNoteMsg(OS);
- return UninitFields
- .insert(std::make_pair(Chain.getUninitRegion(), std::move(NoteMsgBuf)))
- .second;
+ return UninitFields.insert({FR, std::move(NoteMsgBuf)}).second;
}
bool FindUninitializedFields::isNonUnionUninit(const TypedValueRegion *R,
@@ -400,25 +438,27 @@ static void printTail(llvm::raw_ostream &Out,
// Utility functions.
//===----------------------------------------------------------------------===//
-static Optional<nonloc::LazyCompoundVal>
-getObjectVal(const CXXConstructorDecl *CtorDecl, CheckerContext &Context) {
+static const TypedValueRegion *
+getConstructedRegion(const CXXConstructorDecl *CtorDecl,
+ CheckerContext &Context) {
- Loc ThisLoc = Context.getSValBuilder().getCXXThis(CtorDecl->getParent(),
+ Loc ThisLoc = Context.getSValBuilder().getCXXThis(CtorDecl,
Context.getStackFrame());
- // Getting the value for 'this'.
- SVal This = Context.getState()->getSVal(ThisLoc);
- // Getting the value for '*this'.
- SVal Object = Context.getState()->getSVal(This.castAs<Loc>());
+ SVal ObjectV = Context.getState()->getSVal(ThisLoc);
+
+ auto *R = ObjectV.getAsRegion()->getAs<TypedValueRegion>();
+ if (R && !R->getValueType()->getAsCXXRecordDecl())
+ return nullptr;
- return Object.getAs<nonloc::LazyCompoundVal>();
+ return R;
}
static bool willObjectBeAnalyzedLater(const CXXConstructorDecl *Ctor,
CheckerContext &Context) {
- Optional<nonloc::LazyCompoundVal> CurrentObject = getObjectVal(Ctor, Context);
- if (!CurrentObject)
+ const TypedValueRegion *CurrRegion = getConstructedRegion(Ctor, Context);
+ if (!CurrRegion)
return false;
const LocationContext *LC = Context.getLocationContext();
@@ -429,14 +469,14 @@ static bool willObjectBeAnalyzedLater(const CXXConstructorDecl *Ctor,
if (!OtherCtor)
continue;
- Optional<nonloc::LazyCompoundVal> OtherObject =
- getObjectVal(OtherCtor, Context);
- if (!OtherObject)
+ const TypedValueRegion *OtherRegion =
+ getConstructedRegion(OtherCtor, Context);
+ if (!OtherRegion)
continue;
- // If the CurrentObject is a subregion of OtherObject, it will be analyzed
- // during the analysis of OtherObject.
- if (CurrentObject->getRegion()->isSubRegionOf(OtherObject->getRegion()))
+ // If the CurrRegion is a subregion of OtherRegion, it will be analyzed
+ // during the analysis of OtherRegion.
+ if (CurrRegion->isSubRegionOf(OtherRegion))
return true;
}
@@ -487,12 +527,12 @@ void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
UninitObjCheckerOptions &ChOpts = Chk->Opts;
ChOpts.IsPedantic =
- AnOpts.getBooleanOption("Pedantic", /*DefaultVal*/ false, Chk);
+ AnOpts.getCheckerBooleanOption("Pedantic", /*DefaultVal*/ false, Chk);
ChOpts.ShouldConvertNotesToWarnings =
- AnOpts.getBooleanOption("NotesAsWarnings", /*DefaultVal*/ false, Chk);
- ChOpts.CheckPointeeInitialization = AnOpts.getBooleanOption(
+ AnOpts.getCheckerBooleanOption("NotesAsWarnings", /*DefaultVal*/ false, Chk);
+ ChOpts.CheckPointeeInitialization = AnOpts.getCheckerBooleanOption(
"CheckPointeeInitialization", /*DefaultVal*/ false, Chk);
ChOpts.IgnoredRecordsWithFieldPattern =
- AnOpts.getOptionAsString("IgnoreRecordsWithField",
+ AnOpts.getCheckerStringOption("IgnoreRecordsWithField",
/*DefaultVal*/ "", Chk);
}
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
index 623ba6b3ff..ae53f00b0b 100644
--- a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
@@ -89,15 +89,39 @@ public:
}
};
+/// Represents a Loc field that points to itself.
+class CyclicLocField final : public FieldNode {
+
+public:
+ CyclicLocField(const FieldRegion *FR) : FieldNode(FR) {}
+
+ virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ Out << "object references itself ";
+ }
+
+ virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+
+ virtual void printNode(llvm::raw_ostream &Out) const override {
+ Out << getVariableName(getDecl());
+ }
+
+ virtual void printSeparator(llvm::raw_ostream &Out) const override {
+ llvm_unreachable("CyclicLocField objects must be the last node of the "
+ "fieldchain!");
+ }
+};
+
} // end of anonymous namespace
// Utility function declarations.
-/// Returns whether \p T can be (transitively) dereferenced to a void pointer
-/// type (void*, void**, ...).
-static bool isVoidPointer(QualType T);
-
-using DereferenceInfo = std::pair<const TypedValueRegion *, bool>;
+struct DereferenceInfo {
+ const TypedValueRegion *R;
+ const bool NeedsCastBack;
+ const bool IsCyclic;
+ DereferenceInfo(const TypedValueRegion *R, bool NCB, bool IC)
+ : R(R), NeedsCastBack(NCB), IsCyclic(IC) {}
+};
/// Dereferences \p FR and returns with the pointee's region, and whether it
/// needs to be casted back to it's location type. If for whatever reason
@@ -105,6 +129,10 @@ using DereferenceInfo = std::pair<const TypedValueRegion *, bool>;
static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
const FieldRegion *FR);
+/// Returns whether \p T can be (transitively) dereferenced to a void pointer
+/// type (void*, void**, ...).
+static bool isVoidPointer(QualType T);
+
//===----------------------------------------------------------------------===//
// Methods for FindUninitializedFields.
//===----------------------------------------------------------------------===//
@@ -116,7 +144,7 @@ bool FindUninitializedFields::isDereferencableUninit(
assert((isDereferencableType(FR->getDecl()->getType()) ||
V.getAs<nonloc::LocAsInteger>()) &&
- "This method only checks dereferencable objects!");
+ "This method only checks dereferenceable objects!");
if (V.isUnknown() || V.getAs<loc::ConcreteInt>()) {
IsAnyFieldInitialized = true;
@@ -125,7 +153,7 @@ bool FindUninitializedFields::isDereferencableUninit(
if (V.isUndef()) {
return addFieldToUninits(
- LocalChain.add(LocField(FR, /*IsDereferenced*/ false)));
+ LocalChain.add(LocField(FR, /*IsDereferenced*/ false)), FR);
}
if (!Opts.CheckPointeeInitialization) {
@@ -141,8 +169,11 @@ bool FindUninitializedFields::isDereferencableUninit(
return false;
}
- const TypedValueRegion *R = DerefInfo->first;
- const bool NeedsCastBack = DerefInfo->second;
+ if (DerefInfo->IsCyclic)
+ return addFieldToUninits(LocalChain.add(CyclicLocField(FR)), FR);
+
+ const TypedValueRegion *R = DerefInfo->R;
+ const bool NeedsCastBack = DerefInfo->NeedsCastBack;
QualType DynT = R->getLocationType();
QualType PointeeT = DynT->getPointeeType();
@@ -156,8 +187,9 @@ bool FindUninitializedFields::isDereferencableUninit(
if (PointeeT->isUnionType()) {
if (isUnionUninit(R)) {
if (NeedsCastBack)
- return addFieldToUninits(LocalChain.add(NeedsCastLocField(FR, DynT)));
- return addFieldToUninits(LocalChain.add(LocField(FR)));
+ return addFieldToUninits(LocalChain.add(NeedsCastLocField(FR, DynT)),
+ R);
+ return addFieldToUninits(LocalChain.add(LocField(FR)), R);
} else {
IsAnyFieldInitialized = true;
return false;
@@ -177,8 +209,8 @@ bool FindUninitializedFields::isDereferencableUninit(
if (isPrimitiveUninit(PointeeV)) {
if (NeedsCastBack)
- return addFieldToUninits(LocalChain.add(NeedsCastLocField(FR, DynT)));
- return addFieldToUninits(LocalChain.add(LocField(FR)));
+ return addFieldToUninits(LocalChain.add(NeedsCastLocField(FR, DynT)), R);
+ return addFieldToUninits(LocalChain.add(LocField(FR)), R);
}
IsAnyFieldInitialized = true;
@@ -189,15 +221,6 @@ bool FindUninitializedFields::isDereferencableUninit(
// Utility functions.
//===----------------------------------------------------------------------===//
-static bool isVoidPointer(QualType T) {
- while (!T.isNull()) {
- if (T->isVoidPointerType())
- return true;
- T = T->getPointeeType();
- }
- return false;
-}
-
static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
const FieldRegion *FR) {
@@ -229,9 +252,8 @@ static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
return None;
// We found a cyclic pointer, like int *ptr = (int *)&ptr.
- // TODO: Should we report these fields too?
if (!VisitedRegions.insert(R).second)
- return None;
+ return DereferenceInfo{R, NeedsCastBack, /*IsCyclic*/ true};
DynT = R->getLocationType();
// In order to ensure that this loop terminates, we're also checking the
@@ -248,5 +270,14 @@ static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
R = R->getSuperRegion()->getAs<TypedValueRegion>();
}
- return std::make_pair(R, NeedsCastBack);
+ return DereferenceInfo{R, NeedsCastBack, /*IsCyclic*/ false};
+}
+
+static bool isVoidPointer(QualType T) {
+ while (!T.isNull()) {
+ if (T->isVoidPointerType())
+ return true;
+ T = T->getPointeeType();
+ }
+ return false;
}
diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index a6b50dc377..baf9aa0b57 100644
--- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -314,7 +314,7 @@ bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
auto report = llvm::make_unique<BugReport>(*BT_mallocZero, os.str(), N);
report->addRange(arg->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, arg, *report);
+ bugreporter::trackExpressionValue(N, arg, *report);
C.emitReport(std::move(report));
return true;
diff --git a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 6c67aa4e31..f879891703 100644
--- a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -232,7 +232,7 @@ bool UnreachableCodeChecker::isInvalidPath(const CFGBlock *CB,
if (!pred)
return false;
- // Get the predecessor block's terminator conditon
+ // Get the predecessor block's terminator condition
const Stmt *cond = pred->getTerminatorCondition();
//assert(cond && "CFGBlock's predecessor has a terminator condition");
diff --git a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 2584f20118..58ed463476 100644
--- a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -74,7 +74,7 @@ void VLASizeChecker::reportBug(
auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
report->addVisitor(std::move(Visitor));
report->addRange(SizeE->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, SizeE, *report);
+ bugreporter::trackExpressionValue(N, SizeE, *report);
C.emitReport(std::move(report));
}
diff --git a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index cf673de6d4..902b325dec 100644
--- a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -280,5 +280,6 @@ void ento::registerVirtualCallChecker(CheckerManager &mgr) {
VirtualCallChecker *checker = mgr.registerChecker<VirtualCallChecker>();
checker->IsPureOnly =
- mgr.getAnalyzerOptions().getBooleanOption("PureOnly", false, checker);
+ mgr.getAnalyzerOptions().getCheckerBooleanOption("PureOnly", false,
+ checker);
}
diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index 5f45d26d1e..7fb1c09ca0 100644
--- a/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -22,16 +22,21 @@ AnalysisManager::AnalysisManager(ASTContext &ASTCtx, DiagnosticsEngine &diags,
AnalyzerOptions &Options,
CodeInjector *injector)
: AnaCtxMgr(
- ASTCtx, Options.UnoptimizedCFG, Options.includeImplicitDtorsInCFG(),
- /*AddInitializers=*/true, Options.includeTemporaryDtorsInCFG(),
- Options.includeLifetimeInCFG(),
+ ASTCtx, Options.UnoptimizedCFG,
+ Options.ShouldIncludeImplicitDtorsInCFG,
+ /*AddInitializers=*/true,
+ Options.ShouldIncludeTemporaryDtorsInCFG,
+ Options.ShouldIncludeLifetimeInCFG,
// Adding LoopExit elements to the CFG is a requirement for loop
// unrolling.
- Options.includeLoopExitInCFG() || Options.shouldUnrollLoops(),
- Options.includeScopesInCFG(), Options.shouldSynthesizeBodies(),
- Options.shouldConditionalizeStaticInitializers(),
- /*addCXXNewAllocator=*/true, Options.includeRichConstructorsInCFG(),
- Options.shouldElideConstructors(), injector),
+ Options.ShouldIncludeLoopExitInCFG ||
+ Options.ShouldUnrollLoops,
+ Options.ShouldIncludeScopesInCFG,
+ Options.ShouldSynthesizeBodies,
+ Options.ShouldConditionalizeStaticInitializers,
+ /*addCXXNewAllocator=*/true,
+ Options.ShouldIncludeRichConstructorsInCFG,
+ Options.ShouldElideConstructors, injector),
Ctx(ASTCtx), Diags(diags), LangOpts(ASTCtx.getLangOpts()),
PathConsumers(PDC), CreateStoreMgr(storemgr),
CreateConstraintMgr(constraintmgr), CheckerMgr(checkerMgr),
diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index c910d31d4b..d9b63c209d 100644
--- a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -34,7 +34,7 @@ std::vector<StringRef>
AnalyzerOptions::getRegisteredCheckers(bool IncludeExperimental /* = false */) {
static const StringRef StaticAnalyzerChecks[] = {
#define GET_CHECKERS
-#define CHECKER(FULLNAME, CLASS, DESCFILE, HELPTEXT, GROUPINDEX, HIDDEN) \
+#define CHECKER(FULLNAME, CLASS, HELPTEXT) \
FULLNAME,
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER
@@ -49,114 +49,71 @@ AnalyzerOptions::getRegisteredCheckers(bool IncludeExperimental /* = false */) {
return Result;
}
-AnalyzerOptions::UserModeKind AnalyzerOptions::getUserMode() {
- if (UserMode == UMK_NotSet) {
- StringRef ModeStr =
- Config.insert(std::make_pair("mode", "deep")).first->second;
- UserMode = llvm::StringSwitch<UserModeKind>(ModeStr)
- .Case("shallow", UMK_Shallow)
- .Case("deep", UMK_Deep)
- .Default(UMK_NotSet);
- assert(UserMode != UMK_NotSet && "User mode is invalid.");
- }
- return UserMode;
-}
-
-AnalyzerOptions::ExplorationStrategyKind
-AnalyzerOptions::getExplorationStrategy() {
- if (ExplorationStrategy == ExplorationStrategyKind::NotSet) {
- StringRef StratStr =
- Config
- .insert(std::make_pair("exploration_strategy", "unexplored_first_queue"))
- .first->second;
- ExplorationStrategy =
- llvm::StringSwitch<ExplorationStrategyKind>(StratStr)
- .Case("dfs", ExplorationStrategyKind::DFS)
- .Case("bfs", ExplorationStrategyKind::BFS)
- .Case("unexplored_first",
- ExplorationStrategyKind::UnexploredFirst)
- .Case("unexplored_first_queue",
- ExplorationStrategyKind::UnexploredFirstQueue)
- .Case("bfs_block_dfs_contents",
- ExplorationStrategyKind::BFSBlockDFSContents)
- .Default(ExplorationStrategyKind::NotSet);
- assert(ExplorationStrategy != ExplorationStrategyKind::NotSet &&
- "User mode is invalid.");
- }
- return ExplorationStrategy;
-}
-
-IPAKind AnalyzerOptions::getIPAMode() {
- if (IPAMode == IPAK_NotSet) {
- // Use the User Mode to set the default IPA value.
- // Note, we have to add the string to the Config map for the ConfigDumper
- // checker to function properly.
- const char *DefaultIPA = nullptr;
- UserModeKind HighLevelMode = getUserMode();
- if (HighLevelMode == UMK_Shallow)
- DefaultIPA = "inlining";
- else if (HighLevelMode == UMK_Deep)
- DefaultIPA = "dynamic-bifurcate";
- assert(DefaultIPA);
-
- // Lookup the ipa configuration option, use the default from User Mode.
- StringRef ModeStr =
- Config.insert(std::make_pair("ipa", DefaultIPA)).first->second;
- IPAKind IPAConfig = llvm::StringSwitch<IPAKind>(ModeStr)
- .Case("none", IPAK_None)
- .Case("basic-inlining", IPAK_BasicInlining)
- .Case("inlining", IPAK_Inlining)
- .Case("dynamic", IPAK_DynamicDispatch)
- .Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate)
- .Default(IPAK_NotSet);
- assert(IPAConfig != IPAK_NotSet && "IPA Mode is invalid.");
-
- // Set the member variable.
- IPAMode = IPAConfig;
- }
-
- return IPAMode;
+ExplorationStrategyKind
+AnalyzerOptions::getExplorationStrategy() const {
+ auto K =
+ llvm::StringSwitch<llvm::Optional<ExplorationStrategyKind>>(
+ ExplorationStrategy)
+ .Case("dfs", ExplorationStrategyKind::DFS)
+ .Case("bfs", ExplorationStrategyKind::BFS)
+ .Case("unexplored_first",
+ ExplorationStrategyKind::UnexploredFirst)
+ .Case("unexplored_first_queue",
+ ExplorationStrategyKind::UnexploredFirstQueue)
+ .Case("unexplored_first_location_queue",
+ ExplorationStrategyKind::UnexploredFirstLocationQueue)
+ .Case("bfs_block_dfs_contents",
+ ExplorationStrategyKind::BFSBlockDFSContents)
+ .Default(None);
+ assert(K.hasValue() && "User mode is invalid.");
+ return K.getValue();
+}
+
+IPAKind AnalyzerOptions::getIPAMode() const {
+ auto K = llvm::StringSwitch<llvm::Optional<IPAKind>>(IPAMode)
+ .Case("none", IPAK_None)
+ .Case("basic-inlining", IPAK_BasicInlining)
+ .Case("inlining", IPAK_Inlining)
+ .Case("dynamic", IPAK_DynamicDispatch)
+ .Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate)
+ .Default(None);
+ assert(K.hasValue() && "IPA Mode is invalid.");
+
+ return K.getValue();
}
bool
-AnalyzerOptions::mayInlineCXXMemberFunction(CXXInlineableMemberKind K) {
+AnalyzerOptions::mayInlineCXXMemberFunction(
+ CXXInlineableMemberKind Param) const {
if (getIPAMode() < IPAK_Inlining)
return false;
- if (!CXXMemberInliningMode) {
- static const char *ModeKey = "c++-inlining";
-
- StringRef ModeStr =
- Config.insert(std::make_pair(ModeKey, "destructors")).first->second;
+ auto K =
+ llvm::StringSwitch<llvm::Optional<CXXInlineableMemberKind>>(
+ CXXMemberInliningMode)
+ .Case("constructors", CIMK_Constructors)
+ .Case("destructors", CIMK_Destructors)
+ .Case("methods", CIMK_MemberFunctions)
+ .Case("none", CIMK_None)
+ .Default(None);
- CXXInlineableMemberKind &MutableMode =
- const_cast<CXXInlineableMemberKind &>(CXXMemberInliningMode);
-
- MutableMode = llvm::StringSwitch<CXXInlineableMemberKind>(ModeStr)
- .Case("constructors", CIMK_Constructors)
- .Case("destructors", CIMK_Destructors)
- .Case("none", CIMK_None)
- .Case("methods", CIMK_MemberFunctions)
- .Default(CXXInlineableMemberKind());
-
- if (!MutableMode) {
- // FIXME: We should emit a warning here about an unknown inlining kind,
- // but the AnalyzerOptions doesn't have access to a diagnostic engine.
- MutableMode = CIMK_None;
- }
- }
+ assert(K.hasValue() && "Invalid c++ member function inlining mode.");
- return CXXMemberInliningMode >= K;
+ return *K >= Param;
}
-static StringRef toString(bool b) { return b ? "true" : "false"; }
-
-StringRef AnalyzerOptions::getCheckerOption(StringRef CheckerName,
- StringRef OptionName,
- StringRef Default,
- bool SearchInParents) {
+StringRef AnalyzerOptions::getCheckerStringOption(StringRef OptionName,
+ StringRef DefaultVal,
+ const CheckerBase *C,
+ bool SearchInParents) const {
+ assert(C);
// Search for a package option if the option for the checker is not specified
// and search in parents is enabled.
+ StringRef CheckerName = C->getTagDescription();
+
+ assert(!CheckerName.empty() &&
+ "Empty checker name! Make sure the checker object (including it's "
+ "bases!) if fully initialized before calling this function!");
ConfigTable::const_iterator E = Config.end();
do {
ConfigTable::const_iterator I =
@@ -165,338 +122,35 @@ StringRef AnalyzerOptions::getCheckerOption(StringRef CheckerName,
return StringRef(I->getValue());
size_t Pos = CheckerName.rfind('.');
if (Pos == StringRef::npos)
- return Default;
+ return DefaultVal;
CheckerName = CheckerName.substr(0, Pos);
} while (!CheckerName.empty() && SearchInParents);
- return Default;
+ return DefaultVal;
}
-bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal,
- const CheckerBase *C,
- bool SearchInParents) {
+bool AnalyzerOptions::getCheckerBooleanOption(StringRef Name, bool DefaultVal,
+ const CheckerBase *C,
+ bool SearchInParents) const {
// FIXME: We should emit a warning here if the value is something other than
// "true", "false", or the empty string (meaning the default value),
// but the AnalyzerOptions doesn't have access to a diagnostic engine.
- StringRef Default = toString(DefaultVal);
- StringRef V =
- C ? getCheckerOption(C->getTagDescription(), Name, Default,
- SearchInParents)
- : StringRef(Config.insert(std::make_pair(Name, Default)).first->second);
- return llvm::StringSwitch<bool>(V)
+ assert(C);
+ return llvm::StringSwitch<bool>(
+ getCheckerStringOption(Name, DefaultVal ? "true" : "false", C,
+ SearchInParents))
.Case("true", true)
.Case("false", false)
.Default(DefaultVal);
}
-bool AnalyzerOptions::getBooleanOption(Optional<bool> &V, StringRef Name,
- bool DefaultVal, const CheckerBase *C,
- bool SearchInParents) {
- if (!V.hasValue())
- V = getBooleanOption(Name, DefaultVal, C, SearchInParents);
- return V.getValue();
-}
-
-bool AnalyzerOptions::includeTemporaryDtorsInCFG() {
- return getBooleanOption(IncludeTemporaryDtorsInCFG,
- "cfg-temporary-dtors",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::includeImplicitDtorsInCFG() {
- return getBooleanOption(IncludeImplicitDtorsInCFG,
- "cfg-implicit-dtors",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::includeLifetimeInCFG() {
- return getBooleanOption(IncludeLifetimeInCFG, "cfg-lifetime",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::includeLoopExitInCFG() {
- return getBooleanOption(IncludeLoopExitInCFG, "cfg-loopexit",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::includeRichConstructorsInCFG() {
- return getBooleanOption(IncludeRichConstructorsInCFG,
- "cfg-rich-constructors",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::includeScopesInCFG() {
- return getBooleanOption(IncludeScopesInCFG,
- "cfg-scopes",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::mayInlineCXXStandardLibrary() {
- return getBooleanOption(InlineCXXStandardLibrary,
- "c++-stdlib-inlining",
- /*Default=*/true);
-}
-
-bool AnalyzerOptions::mayInlineTemplateFunctions() {
- return getBooleanOption(InlineTemplateFunctions,
- "c++-template-inlining",
- /*Default=*/true);
-}
-
-bool AnalyzerOptions::mayInlineCXXAllocator() {
- return getBooleanOption(InlineCXXAllocator,
- "c++-allocator-inlining",
- /*Default=*/true);
-}
-
-bool AnalyzerOptions::mayInlineCXXContainerMethods() {
- return getBooleanOption(InlineCXXContainerMethods,
- "c++-container-inlining",
- /*Default=*/false);
-}
-
-bool AnalyzerOptions::mayInlineCXXSharedPtrDtor() {
- return getBooleanOption(InlineCXXSharedPtrDtor,
- "c++-shared_ptr-inlining",
- /*Default=*/false);
-}
-
-bool AnalyzerOptions::mayInlineCXXTemporaryDtors() {
- return getBooleanOption(InlineCXXTemporaryDtors,
- "c++-temp-dtor-inlining",
- /*Default=*/true);
-}
-
-bool AnalyzerOptions::mayInlineObjCMethod() {
- return getBooleanOption(ObjCInliningMode,
- "objc-inlining",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::shouldSuppressNullReturnPaths() {
- return getBooleanOption(SuppressNullReturnPaths,
- "suppress-null-return-paths",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::shouldAvoidSuppressingNullArgumentPaths() {
- return getBooleanOption(AvoidSuppressingNullArgumentPaths,
- "avoid-suppressing-null-argument-paths",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::shouldSuppressInlinedDefensiveChecks() {
- return getBooleanOption(SuppressInlinedDefensiveChecks,
- "suppress-inlined-defensive-checks",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::shouldSuppressFromCXXStandardLibrary() {
- return getBooleanOption(SuppressFromCXXStandardLibrary,
- "suppress-c++-stdlib",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::shouldCrosscheckWithZ3() {
- return getBooleanOption(CrosscheckWithZ3,
- "crosscheck-with-z3",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::shouldReportIssuesInMainSourceFile() {
- return getBooleanOption(ReportIssuesInMainSourceFile,
- "report-in-main-source-file",
- /* Default = */ false);
-}
-
-
-bool AnalyzerOptions::shouldWriteStableReportFilename() {
- return getBooleanOption(StableReportFilename,
- "stable-report-filename",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::shouldSerializeStats() {
- return getBooleanOption(SerializeStats,
- "serialize-stats",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::shouldElideConstructors() {
- return getBooleanOption(ElideConstructors,
- "elide-constructors",
- /* Default = */ true);
-}
-
-int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal,
+int AnalyzerOptions::getCheckerIntegerOption(StringRef Name, int DefaultVal,
const CheckerBase *C,
- bool SearchInParents) {
- SmallString<10> StrBuf;
- llvm::raw_svector_ostream OS(StrBuf);
- OS << DefaultVal;
-
- StringRef V = C ? getCheckerOption(C->getTagDescription(), Name, OS.str(),
- SearchInParents)
- : StringRef(Config.insert(std::make_pair(Name, OS.str()))
- .first->second);
-
- int Res = DefaultVal;
- bool b = V.getAsInteger(10, Res);
- assert(!b && "analyzer-config option should be numeric");
- (void)b;
- return Res;
-}
-
-StringRef AnalyzerOptions::getOptionAsString(StringRef Name,
- StringRef DefaultVal,
- const CheckerBase *C,
- bool SearchInParents) {
- return C ? getCheckerOption(C->getTagDescription(), Name, DefaultVal,
- SearchInParents)
- : StringRef(
- Config.insert(std::make_pair(Name, DefaultVal)).first->second);
-}
-
-unsigned AnalyzerOptions::getAlwaysInlineSize() {
- if (!AlwaysInlineSize.hasValue())
- AlwaysInlineSize = getOptionAsInteger("ipa-always-inline-size", 3);
- return AlwaysInlineSize.getValue();
-}
-
-unsigned AnalyzerOptions::getMaxInlinableSize() {
- if (!MaxInlinableSize.hasValue()) {
- int DefaultValue = 0;
- UserModeKind HighLevelMode = getUserMode();
- switch (HighLevelMode) {
- default:
- llvm_unreachable("Invalid mode.");
- case UMK_Shallow:
- DefaultValue = 4;
- break;
- case UMK_Deep:
- DefaultValue = 100;
- break;
- }
-
- MaxInlinableSize = getOptionAsInteger("max-inlinable-size", DefaultValue);
- }
- return MaxInlinableSize.getValue();
-}
-
-unsigned AnalyzerOptions::getGraphTrimInterval() {
- if (!GraphTrimInterval.hasValue())
- GraphTrimInterval = getOptionAsInteger("graph-trim-interval", 1000);
- return GraphTrimInterval.getValue();
-}
-
-unsigned AnalyzerOptions::getMaxSymbolComplexity() {
- if (!MaxSymbolComplexity.hasValue())
- MaxSymbolComplexity = getOptionAsInteger("max-symbol-complexity", 35);
- return MaxSymbolComplexity.getValue();
-}
-
-unsigned AnalyzerOptions::getMaxTimesInlineLarge() {
- if (!MaxTimesInlineLarge.hasValue())
- MaxTimesInlineLarge = getOptionAsInteger("max-times-inline-large", 32);
- return MaxTimesInlineLarge.getValue();
-}
-
-unsigned AnalyzerOptions::getMinCFGSizeTreatFunctionsAsLarge() {
- if (!MinCFGSizeTreatFunctionsAsLarge.hasValue())
- MinCFGSizeTreatFunctionsAsLarge = getOptionAsInteger(
- "min-cfg-size-treat-functions-as-large", 14);
- return MinCFGSizeTreatFunctionsAsLarge.getValue();
-}
-
-unsigned AnalyzerOptions::getMaxNodesPerTopLevelFunction() {
- if (!MaxNodesPerTopLevelFunction.hasValue()) {
- int DefaultValue = 0;
- UserModeKind HighLevelMode = getUserMode();
- switch (HighLevelMode) {
- default:
- llvm_unreachable("Invalid mode.");
- case UMK_Shallow:
- DefaultValue = 75000;
- break;
- case UMK_Deep:
- DefaultValue = 225000;
- break;
- }
- MaxNodesPerTopLevelFunction = getOptionAsInteger("max-nodes", DefaultValue);
- }
- return MaxNodesPerTopLevelFunction.getValue();
-}
-
-bool AnalyzerOptions::shouldSynthesizeBodies() {
- return getBooleanOption("faux-bodies", true);
-}
-
-bool AnalyzerOptions::shouldPrunePaths() {
- return getBooleanOption("prune-paths", true);
-}
-
-bool AnalyzerOptions::shouldConditionalizeStaticInitializers() {
- return getBooleanOption("cfg-conditional-static-initializers", true);
-}
-
-bool AnalyzerOptions::shouldInlineLambdas() {
- if (!InlineLambdas.hasValue())
- InlineLambdas = getBooleanOption("inline-lambdas", /*Default=*/true);
- return InlineLambdas.getValue();
-}
-
-bool AnalyzerOptions::shouldWidenLoops() {
- if (!WidenLoops.hasValue())
- WidenLoops = getBooleanOption("widen-loops", /*Default=*/false);
- return WidenLoops.getValue();
-}
-
-bool AnalyzerOptions::shouldUnrollLoops() {
- if (!UnrollLoops.hasValue())
- UnrollLoops = getBooleanOption("unroll-loops", /*Default=*/false);
- return UnrollLoops.getValue();
-}
-
-bool AnalyzerOptions::shouldDisplayNotesAsEvents() {
- if (!DisplayNotesAsEvents.hasValue())
- DisplayNotesAsEvents =
- getBooleanOption("notes-as-events", /*Default=*/false);
- return DisplayNotesAsEvents.getValue();
-}
-
-bool AnalyzerOptions::shouldAggressivelySimplifyBinaryOperation() {
- if (!AggressiveBinaryOperationSimplification.hasValue())
- AggressiveBinaryOperationSimplification =
- getBooleanOption("aggressive-binary-operation-simplification",
- /*Default=*/false);
- return AggressiveBinaryOperationSimplification.getValue();
-}
-
-bool AnalyzerOptions::shouldEagerlyAssume() {
- if (!EagerlyAssumeBinOpBifurcation.hasValue())
- EagerlyAssumeBinOpBifurcation =
- getBooleanOption("eagerly-assume", true);
- return EagerlyAssumeBinOpBifurcation.getValue();
-}
-
-StringRef AnalyzerOptions::getCTUDir() {
- if (!CTUDir.hasValue()) {
- CTUDir = getOptionAsString("ctu-dir", "");
- if (!llvm::sys::fs::is_directory(*CTUDir))
- CTUDir = "";
- }
- return CTUDir.getValue();
-}
-
-bool AnalyzerOptions::naiveCTUEnabled() {
- if (!NaiveCTU.hasValue()) {
- NaiveCTU = getBooleanOption("experimental-enable-naive-ctu-analysis",
- /*Default=*/false);
- }
- return NaiveCTU.getValue();
-}
-
-StringRef AnalyzerOptions::getCTUIndexName() {
- if (!CTUIndexName.hasValue())
- CTUIndexName = getOptionAsString("ctu-index-name", "externalFnMap.txt");
- return CTUIndexName.getValue();
+ bool SearchInParents) const {
+ int Ret = DefaultVal;
+ bool HasFailed = getCheckerStringOption(Name, std::to_string(DefaultVal), C,
+ SearchInParents)
+ .getAsInteger(10, Ret);
+ assert(!HasFailed && "analyzer-config option should be numeric");
+ (void)HasFailed;
+ return Ret;
}
diff --git a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index db4c1432cc..d8ed6942de 100644
--- a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -207,7 +207,7 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
const llvm::APSInt& V1, const llvm::APSInt& V2) {
switch (Op) {
default:
- assert(false && "Invalid Opcode.");
+ llvm_unreachable("Invalid Opcode.");
case BO_Mul:
return &getValue( V1 * V2 );
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index 8899fa67a3..fd7f532104 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -546,7 +546,8 @@ static void updateStackPiecesWithMessage(PathDiagnosticPiece &P,
}
}
-static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
+static void CompactMacroExpandedPieces(PathPieces &path,
+ const SourceManager& SM);
std::shared_ptr<PathDiagnosticControlFlowPiece> generateDiagForSwitchOP(
@@ -819,7 +820,7 @@ void generateMinimalDiagForBlockEdge(const ExplodedNode *N, BlockEdge BE,
// and values by tracing interesting calculations backwards through evaluated
// expressions along a path. This is probably overly complicated, but the idea
// is that if an expression computed an "interesting" value, the child
-// expressions are are also likely to be "interesting" as well (which then
+// expressions are also likely to be "interesting" as well (which then
// propagates to the values they in turn compute). This reverse propagation
// is needed to track interesting correlations across function call boundaries,
// where formal arguments bind to actual arguments, etc. This is also needed
@@ -841,7 +842,7 @@ static void reversePropagateIntererstingSymbols(BugReport &R,
default:
if (!isa<CastExpr>(Ex))
break;
- // Fall through.
+ LLVM_FALLTHROUGH;
case Stmt::BinaryOperatorClass:
case Stmt::UnaryOperatorClass: {
for (const Stmt *SubStmt : Ex->children()) {
@@ -1265,7 +1266,7 @@ static const Stmt *getStmtParent(const Stmt *S, const ParentMap &PM) {
if (!S)
break;
- if (isa<ExprWithCleanups>(S) ||
+ if (isa<FullExpr>(S) ||
isa<CXXBindTemporaryExpr>(S) ||
isa<SubstNonTypeTemplateParmExpr>(S))
continue;
@@ -1972,12 +1973,10 @@ static std::unique_ptr<PathDiagnostic> generatePathDiagnosticForConsumer(
PathDiagnosticLocation::createBegin(D, SM));
}
- if (!AddPathEdges && GenerateDiagnostics)
- CompactPathDiagnostic(PD->getMutablePieces(), SM);
// Finally, prune the diagnostic path of uninteresting stuff.
if (!PD->path.empty()) {
- if (R->shouldPrunePath() && Opts.shouldPrunePaths()) {
+ if (R->shouldPrunePath() && Opts.ShouldPrunePaths) {
bool stillHasNotes =
removeUnneededCalls(PD->getMutablePieces(), R, LCM);
assert(stillHasNotes);
@@ -2007,6 +2006,10 @@ static std::unique_ptr<PathDiagnostic> generatePathDiagnosticForConsumer(
removeRedundantMsgs(PD->getMutablePieces());
removeEdgesToDefaultInitializers(PD->getMutablePieces());
}
+
+ if (GenerateDiagnostics && Opts.ShouldDisplayMacroExpansions)
+ CompactMacroExpandedPieces(PD->getMutablePieces(), SM);
+
return PD;
}
@@ -2436,9 +2439,10 @@ bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
return true;
}
-/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
-/// and collapses PathDiagosticPieces that are expanded by macros.
-static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
+/// CompactMacroExpandedPieces - This function postprocesses a PathDiagnostic
+/// object and collapses PathDiagosticPieces that are expanded by macros.
+static void CompactMacroExpandedPieces(PathPieces &path,
+ const SourceManager& SM) {
using MacroStackTy =
std::vector<
std::pair<std::shared_ptr<PathDiagnosticMacroPiece>, SourceLocation>>;
@@ -2454,7 +2458,7 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
// Recursively compact calls.
if (auto *call = dyn_cast<PathDiagnosticCallPiece>(&*piece)) {
- CompactPathDiagnostic(call->path, SM);
+ CompactMacroExpandedPieces(call->path, SM);
}
// Get the location of the PathDiagnosticPiece.
@@ -2617,7 +2621,7 @@ std::pair<BugReport*, std::unique_ptr<VisitorsDiagnosticsTy>> findValidReport(
generateVisitorsDiagnostics(R, ErrorNode, BRC);
if (R->isValid()) {
- if (Opts.shouldCrosscheckWithZ3()) {
+ if (Opts.ShouldCrosscheckWithZ3) {
// If crosscheck is enabled, remove all visitors, add the refutation
// visitor and check again
R->clearVisitors();
@@ -2959,7 +2963,7 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
}
PathPieces &Pieces = PD->getMutablePieces();
- if (getAnalyzerOptions().shouldDisplayNotesAsEvents()) {
+ if (getAnalyzerOptions().ShouldDisplayNotesAsEvents) {
// For path diagnostic consumers that don't support extra notes,
// we may optionally convert those to path notes.
for (auto I = report->getNotes().rbegin(),
@@ -3096,7 +3100,7 @@ BugReporter::generateDiagnosticForConsumerMap(
// report location to the last piece in the main source file.
AnalyzerOptions &Opts = getAnalyzerOptions();
for (auto const &P : *Out)
- if (Opts.shouldReportIssuesInMainSourceFile() && !Opts.AnalyzeAll)
+ if (Opts.ShouldReportIssuesInMainSourceFile && !Opts.AnalyzeAll)
P.second->resetDiagnosticLocationToMainFile();
return Out;
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 1e2939b79d..da94b6eb21 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -71,12 +71,6 @@ using namespace ento;
// Utility functions.
//===----------------------------------------------------------------------===//
-bool bugreporter::isDeclRefExprToReference(const Expr *E) {
- if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
- return DRE->getDecl()->getType()->isReferenceType();
- return false;
-}
-
static const Expr *peelOffPointerArithmetic(const BinaryOperator *B) {
if (B->isAdditiveOp() && B->getType()->isPointerType()) {
if (B->getLHS()->getType()->isPointerType()) {
@@ -142,8 +136,8 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
E = AE->getBase();
} else if (const auto *PE = dyn_cast<ParenExpr>(E)) {
E = PE->getSubExpr();
- } else if (const auto *EWC = dyn_cast<ExprWithCleanups>(E)) {
- E = EWC->getSubExpr();
+ } else if (const auto *FE = dyn_cast<FullExpr>(E)) {
+ E = FE->getSubExpr();
} else {
// Other arbitrary stuff.
break;
@@ -160,20 +154,6 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
return E;
}
-const Stmt *bugreporter::GetDenomExpr(const ExplodedNode *N) {
- const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
- if (const auto *BE = dyn_cast<BinaryOperator>(S))
- return BE->getRHS();
- return nullptr;
-}
-
-const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
- const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
- if (const auto *RS = dyn_cast<ReturnStmt>(S))
- return RS->getRetValue();
- return nullptr;
-}
-
//===----------------------------------------------------------------------===//
// Definitions for bug reporter visitors.
//===----------------------------------------------------------------------===//
@@ -696,8 +676,8 @@ public:
bool EnableNullFPSuppression, BugReport &BR,
const SVal V) {
AnalyzerOptions &Options = N->getState()->getAnalysisManager().options;
- if (EnableNullFPSuppression && Options.shouldSuppressNullReturnPaths()
- && V.getAs<Loc>())
+ if (EnableNullFPSuppression &&
+ Options.ShouldSuppressNullReturnPaths && V.getAs<Loc>())
BR.addVisitor(llvm::make_unique<MacroNullReturnSuppressionVisitor>(
R->getAs<SubRegion>(), V));
}
@@ -828,7 +808,8 @@ public:
AnalyzerOptions &Options = State->getAnalysisManager().options;
bool EnableNullFPSuppression = false;
- if (InEnableNullFPSuppression && Options.shouldSuppressNullReturnPaths())
+ if (InEnableNullFPSuppression &&
+ Options.ShouldSuppressNullReturnPaths)
if (Optional<Loc> RetLoc = RetVal.getAs<Loc>())
EnableNullFPSuppression = State->isNull(*RetLoc).isConstrainedTrue();
@@ -884,7 +865,7 @@ public:
RetE = RetE->IgnoreParenCasts();
// If we're returning 0, we should track where that 0 came from.
- bugreporter::trackNullOrUndefValue(N, RetE, BR, EnableNullFPSuppression);
+ bugreporter::trackExpressionValue(N, RetE, BR, EnableNullFPSuppression);
// Build an appropriate message based on the return value.
SmallString<64> Msg;
@@ -897,7 +878,7 @@ public:
// future nodes. We want to emit a path note as well, in case
// the report is resurrected as valid later on.
if (EnableNullFPSuppression &&
- Options.shouldAvoidSuppressingNullArgumentPaths())
+ Options.ShouldAvoidSuppressingNullArgumentPaths)
Mode = MaybeUnsuppress;
if (RetE->getType()->isObjCObjectPointerType()) {
@@ -945,7 +926,7 @@ public:
visitNodeMaybeUnsuppress(const ExplodedNode *N,
BugReporterContext &BRC, BugReport &BR) {
#ifndef NDEBUG
- assert(Options.shouldAvoidSuppressingNullArgumentPaths());
+ assert(Options.ShouldAvoidSuppressingNullArgumentPaths);
#endif
// Are we at the entry node for this call?
@@ -979,8 +960,7 @@ public:
if (!State->isNull(*ArgV).isConstrainedTrue())
continue;
- if (bugreporter::trackNullOrUndefValue(N, ArgE, BR,
- EnableNullFPSuppression))
+ if (bugreporter::trackExpressionValue(N, ArgE, BR, EnableNullFPSuppression))
ShouldInvalidate = false;
// If we /can't/ track the null pointer, we should err on the side of
@@ -1258,8 +1238,8 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
V.getAs<loc::ConcreteInt>() || V.getAs<nonloc::ConcreteInt>()) {
if (!IsParam)
InitE = InitE->IgnoreParenCasts();
- bugreporter::trackNullOrUndefValue(StoreSite, InitE, BR,
- EnableNullFPSuppression);
+ bugreporter::trackExpressionValue(StoreSite, InitE, BR,
+ EnableNullFPSuppression);
}
ReturnVisitor::addVisitorIfNecessary(StoreSite, InitE->IgnoreParenCasts(),
BR, EnableNullFPSuppression);
@@ -1399,7 +1379,7 @@ SuppressInlineDefensiveChecksVisitor(DefinedSVal Value, const ExplodedNode *N)
: V(Value) {
// Check if the visitor is disabled.
AnalyzerOptions &Options = N->getState()->getAnalysisManager().options;
- if (!Options.shouldSuppressInlinedDefensiveChecks())
+ if (!Options.ShouldSuppressInlinedDefensiveChecks)
IsSatisfied = true;
assert(N->getState()->isNull(V).isConstrainedTrue() &&
@@ -1515,8 +1495,8 @@ static const MemRegion *getLocationRegionIfReference(const Expr *E,
static const Expr *peelOffOuterExpr(const Expr *Ex,
const ExplodedNode *N) {
Ex = Ex->IgnoreParenCasts();
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(Ex))
- return peelOffOuterExpr(EWC->getSubExpr(), N);
+ if (const auto *FE = dyn_cast<FullExpr>(Ex))
+ return peelOffOuterExpr(FE->getSubExpr(), N);
if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ex))
return peelOffOuterExpr(OVE->getSourceExpr(), N);
if (const auto *POE = dyn_cast<PseudoObjectExpr>(Ex)) {
@@ -1588,14 +1568,13 @@ static const ExplodedNode* findNodeForExpression(const ExplodedNode *N,
return N;
}
-bool bugreporter::trackNullOrUndefValue(const ExplodedNode *InputNode,
- const Stmt *InputS,
- BugReport &report,
- bool EnableNullFPSuppression) {
- if (!InputS || !InputNode || !isa<Expr>(InputS))
+bool bugreporter::trackExpressionValue(const ExplodedNode *InputNode,
+ const Expr *E, BugReport &report,
+ bool EnableNullFPSuppression) {
+ if (!E || !InputNode)
return false;
- const Expr *Inner = peelOffOuterExpr(cast<Expr>(InputS), InputNode);
+ const Expr *Inner = peelOffOuterExpr(E, InputNode);
const ExplodedNode *LVNode = findNodeForExpression(InputNode, Inner);
if (!LVNode)
return false;
@@ -1606,11 +1585,11 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *InputNode,
// At this point in the path, the receiver should be live since we are at the
// message send expr. If it is nil, start tracking it.
if (const Expr *Receiver = NilReceiverBRVisitor::getNilReceiver(Inner, LVNode))
- trackNullOrUndefValue(LVNode, Receiver, report, EnableNullFPSuppression);
+ trackExpressionValue(LVNode, Receiver, report, EnableNullFPSuppression);
// See if the expression we're interested refers to a variable.
// If so, we can track both its contents and constraints on its value.
- if (Inner && ExplodedGraph::isInterestingLValueExpr(Inner)) {
+ if (ExplodedGraph::isInterestingLValueExpr(Inner)) {
SVal LVal = LVNode->getSVal(Inner);
const MemRegion *RR = getLocationRegionIfReference(Inner, LVNode);
@@ -1703,7 +1682,7 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *InputNode,
if (RegionRVal && isa<SymbolicRegion>(RegionRVal)) {
report.markInteresting(RegionRVal);
report.addVisitor(llvm::make_unique<TrackConstraintBRVisitor>(
- loc::MemRegionVal(RegionRVal), false));
+ loc::MemRegionVal(RegionRVal), /*assumption=*/false));
}
}
return true;
@@ -1751,8 +1730,8 @@ NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
// The receiver was nil, and hence the method was skipped.
// Register a BugReporterVisitor to issue a message telling us how
// the receiver was null.
- bugreporter::trackNullOrUndefValue(N, Receiver, BR,
- /*EnableNullFPSuppression*/ false);
+ bugreporter::trackExpressionValue(N, Receiver, BR,
+ /*EnableNullFPSuppression*/ false);
// Issue a message saying that the method was skipped.
PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
N->getLocationContext());
@@ -2241,7 +2220,7 @@ void LikelyFalsePositiveSuppressionBRVisitor::finalizeVisitor(
// the user's fault, we currently don't report them very well, and
// Note that this will not help for any other data structure libraries, like
// TR1, Boost, or llvm/ADT.
- if (Options.shouldSuppressFromCXXStandardLibrary()) {
+ if (Options.ShouldSuppressFromCXXStandardLibrary) {
BR.markInvalid(getTag(), nullptr);
return;
} else {
@@ -2448,15 +2427,19 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
// Add constraints to the solver
for (const auto &I : Constraints) {
- SymbolRef Sym = I.first;
+ const SymbolRef Sym = I.first;
+ auto RangeIt = I.second.begin();
- SMTExprRef Constraints = RefutationSolver->fromBoolean(false);
- for (const auto &Range : I.second) {
+ SMTExprRef Constraints = SMTConv::getRangeExpr(
+ RefutationSolver, Ctx, Sym, RangeIt->From(), RangeIt->To(),
+ /*InRange=*/true);
+ while ((++RangeIt) != I.second.end()) {
Constraints = RefutationSolver->mkOr(
Constraints, SMTConv::getRangeExpr(RefutationSolver, Ctx, Sym,
- Range.From(), Range.To(),
+ RangeIt->From(), RangeIt->To(),
/*InRange=*/true));
}
+
RefutationSolver->addConstraint(Constraints);
}
diff --git a/lib/StaticAnalyzer/Core/CMakeLists.txt b/lib/StaticAnalyzer/Core/CMakeLists.txt
index db06e4efd5..17334d841e 100644
--- a/lib/StaticAnalyzer/Core/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Core/CMakeLists.txt
@@ -45,13 +45,15 @@ add_clang_library(clangStaticAnalyzerCore
RangedConstraintManager.cpp
RegionStore.cpp
RetainSummaryManager.cpp
- SValBuilder.cpp
- SVals.cpp
+ SarifDiagnostics.cpp
SimpleConstraintManager.cpp
SimpleSValBuilder.cpp
Store.cpp
SubEngine.cpp
+ SValBuilder.cpp
+ SVals.cpp
SymbolManager.cpp
+ TaintManager.cpp
WorkList.cpp
Z3ConstraintManager.cpp
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index 7b6a8d4d25..767116630f 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -503,10 +503,14 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
const ParmVarDecl *ParamDecl = *I;
assert(ParamDecl && "Formal parameter has no decl?");
+ // TODO: Support allocator calls.
if (Call.getKind() != CE_CXXAllocator)
if (Call.isArgumentConstructedDirectly(Idx))
continue;
+ // TODO: Allocators should receive the correct size and possibly alignment,
+ // determined in compile-time but not represented as arg-expressions,
+ // which makes getArgSVal() fail and return UnknownVal.
SVal ArgVal = Call.getArgSVal(Idx);
if (!ArgVal.isUnknown()) {
Loc ParamLoc = SVB.makeLoc(MRMgr.getVarRegion(ParamDecl, CalleeCtx));
@@ -550,13 +554,14 @@ RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
AnalyzerOptions &Opts = Engine->getAnalysisManager().options;
// Try to get CTU definition only if CTUDir is provided.
- if (!Opts.naiveCTUEnabled())
+ if (!Opts.IsNaiveCTUEnabled)
return {};
cross_tu::CrossTranslationUnitContext &CTUCtx =
*Engine->getCrossTranslationUnitContext();
llvm::Expected<const FunctionDecl *> CTUDeclOrError =
- CTUCtx.getCrossTUDefinition(FD, Opts.getCTUDir(), Opts.getCTUIndexName());
+ CTUCtx.getCrossTUDefinition(FD, Opts.CTUDir, Opts.CTUIndexName,
+ Opts.DisplayCTUProgress);
if (!CTUDeclOrError) {
handleAllErrors(CTUDeclOrError.takeError(),
diff --git a/lib/StaticAnalyzer/Core/Checker.cpp b/lib/StaticAnalyzer/Core/Checker.cpp
index b422a88719..72bfd84b40 100644
--- a/lib/StaticAnalyzer/Core/Checker.cpp
+++ b/lib/StaticAnalyzer/Core/Checker.cpp
@@ -17,6 +17,8 @@
using namespace clang;
using namespace ento;
+int ImplicitNullDerefEvent::Tag;
+
StringRef CheckerBase::getTagDescription() const {
return getCheckName().getName();
}
diff --git a/lib/StaticAnalyzer/Core/CheckerManager.cpp b/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 3740e4bf4d..688c47e984 100644
--- a/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -441,8 +441,8 @@ void CheckerManager::runCheckersForEndFunction(NodeBuilderContext &BC,
ExplodedNode *Pred,
ExprEngine &Eng,
const ReturnStmt *RS) {
- // We define the builder outside of the loop bacause if at least one checkers
- // creates a sucsessor for Pred, we do not need to generate an
+ // We define the builder outside of the loop because if at least one checker
+ // creates a successor for Pred, we do not need to generate an
// autotransition for it.
NodeBuilder Bldr(Pred, Dst, BC);
for (const auto checkFn : EndFunctionCheckers) {
diff --git a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
index 89adea2cff..00475c04fd 100644
--- a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
@@ -12,7 +12,6 @@
#include "clang/Basic/LLVM.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
@@ -30,6 +29,41 @@ static const char PackageSeparator = '.';
using CheckerInfoSet = llvm::SetVector<const CheckerRegistry::CheckerInfo *>;
+namespace {
+/// Represents a request to include or exclude a checker or package from a
+/// specific analysis run.
+///
+/// \sa CheckerRegistry::initializeManager
+class CheckerOptInfo {
+ StringRef Name;
+ bool Enable;
+ bool Claimed;
+
+public:
+ CheckerOptInfo(StringRef name, bool enable)
+ : Name(name), Enable(enable), Claimed(false) { }
+
+ StringRef getName() const { return Name; }
+ bool isEnabled() const { return Enable; }
+ bool isDisabled() const { return !isEnabled(); }
+
+ bool isClaimed() const { return Claimed; }
+ bool isUnclaimed() const { return !isClaimed(); }
+ void claim() { Claimed = true; }
+};
+
+} // end of anonymous namespace
+
+static SmallVector<CheckerOptInfo, 8>
+getCheckerOptList(const AnalyzerOptions &opts) {
+ SmallVector<CheckerOptInfo, 8> checkerOpts;
+ for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
+ const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
+ checkerOpts.push_back(CheckerOptInfo(opt.first, opt.second));
+ }
+ return checkerOpts;
+}
+
static bool checkerNameLT(const CheckerRegistry::CheckerInfo &a,
const CheckerRegistry::CheckerInfo &b) {
return a.FullName < b.FullName;
@@ -52,6 +86,7 @@ static bool isInPackage(const CheckerRegistry::CheckerInfo &checker,
return false;
}
+/// Collects the checkers for the supplied \p opt option into \p collected.
static void collectCheckers(const CheckerRegistry::CheckerInfoList &checkers,
const llvm::StringMap<size_t> &packageSizes,
CheckerOptInfo &opt, CheckerInfoSet &collected) {
@@ -101,13 +136,15 @@ void CheckerRegistry::addChecker(InitializationFunction fn, StringRef name,
}
void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
- SmallVectorImpl<CheckerOptInfo> &opts) const {
+ const AnalyzerOptions &Opts,
+ DiagnosticsEngine &diags) const {
// Sort checkers for efficient collection.
llvm::sort(Checkers, checkerNameLT);
+ llvm::SmallVector<CheckerOptInfo, 8> checkerOpts = getCheckerOptList(Opts);
// Collect checkers enabled by the options.
CheckerInfoSet enabledCheckers;
- for (auto &i : opts)
+ for (auto &i : checkerOpts)
collectCheckers(Checkers, Packages, i, enabledCheckers);
// Initialize the CheckerManager with all enabled checkers.
@@ -115,6 +152,15 @@ void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
checkerMgr.setCurrentCheckName(CheckName(i->FullName));
i->Initialize(checkerMgr);
}
+
+ for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
+ if (checkerOpts[i].isUnclaimed()) {
+ diags.Report(diag::err_unknown_analyzer_checker)
+ << checkerOpts[i].getName();
+ diags.Report(diag::note_suggest_disabling_all_checkers);
+ }
+
+ }
}
void CheckerRegistry::validateCheckerOptions(const AnalyzerOptions &opts,
@@ -176,13 +222,14 @@ void CheckerRegistry::printHelp(raw_ostream &out,
}
}
-void CheckerRegistry::printList(
- raw_ostream &out, SmallVectorImpl<CheckerOptInfo> &opts) const {
+void CheckerRegistry::printList(raw_ostream &out,
+ const AnalyzerOptions &opts) const {
llvm::sort(Checkers, checkerNameLT);
+ llvm::SmallVector<CheckerOptInfo, 8> checkerOpts = getCheckerOptList(opts);
// Collect checkers enabled by the options.
CheckerInfoSet enabledCheckers;
- for (auto &i : opts)
+ for (auto &i : checkerOpts)
collectCheckers(Checkers, Packages, i, enabledCheckers);
for (const auto *i : enabledCheckers)
diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp
index e5a5296e02..196854cb09 100644
--- a/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -53,26 +53,28 @@ STATISTIC(NumPathsExplored,
// Core analysis engine.
//===----------------------------------------------------------------------===//
-static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts) {
+static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts,
+ SubEngine &subengine) {
switch (Opts.getExplorationStrategy()) {
- case AnalyzerOptions::ExplorationStrategyKind::DFS:
+ case ExplorationStrategyKind::DFS:
return WorkList::makeDFS();
- case AnalyzerOptions::ExplorationStrategyKind::BFS:
+ case ExplorationStrategyKind::BFS:
return WorkList::makeBFS();
- case AnalyzerOptions::ExplorationStrategyKind::BFSBlockDFSContents:
+ case ExplorationStrategyKind::BFSBlockDFSContents:
return WorkList::makeBFSBlockDFSContents();
- case AnalyzerOptions::ExplorationStrategyKind::UnexploredFirst:
+ case ExplorationStrategyKind::UnexploredFirst:
return WorkList::makeUnexploredFirst();
- case AnalyzerOptions::ExplorationStrategyKind::UnexploredFirstQueue:
+ case ExplorationStrategyKind::UnexploredFirstQueue:
return WorkList::makeUnexploredFirstPriorityQueue();
- default:
- llvm_unreachable("Unexpected case");
+ case ExplorationStrategyKind::UnexploredFirstLocationQueue:
+ return WorkList::makeUnexploredFirstPriorityLocationQueue();
}
+ llvm_unreachable("Unknown AnalyzerOptions::ExplorationStrategyKind");
}
CoreEngine::CoreEngine(SubEngine &subengine, FunctionSummariesTy *FS,
AnalyzerOptions &Opts)
- : SubEng(subengine), WList(generateWorkList(Opts)),
+ : SubEng(subengine), WList(generateWorkList(Opts, subengine)),
BCounterFactory(G.getAllocator()), FunctionSummaries(FS) {}
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
diff --git a/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
index 5309339168..da7854df1d 100644
--- a/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
+++ b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
@@ -77,5 +77,10 @@ void printDynamicTypeInfo(ProgramStateRef State, raw_ostream &Out,
}
}
+void *ProgramStateTrait<DynamicTypeMap>::GDMIndex() {
+ static int index = 0;
+ return &index;
+}
+
} // namespace ento
} // namespace clang
diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp
index 96b5931036..b45f93b6dd 100644
--- a/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/lib/StaticAnalyzer/Core/Environment.cpp
@@ -44,6 +44,9 @@ static const Expr *ignoreTransparentExprs(const Expr *E) {
case Stmt::ExprWithCleanupsClass:
E = cast<ExprWithCleanups>(E)->getSubExpr();
break;
+ case Stmt::ConstantExprClass:
+ E = cast<ConstantExpr>(E)->getSubExpr();
+ break;
case Stmt::CXXBindTemporaryExprClass:
E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
break;
@@ -89,6 +92,7 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
case Stmt::ExprWithCleanupsClass:
case Stmt::GenericSelectionExprClass:
case Stmt::OpaqueValueExprClass:
+ case Stmt::ConstantExprClass:
case Stmt::ParenExprClass:
case Stmt::SubstNonTypeTemplateParmExprClass:
llvm_unreachable("Should have been handled by ignoreTransparentExprs");
@@ -189,11 +193,6 @@ EnvironmentManager::removeDeadBindings(Environment Env,
// Mark all symbols in the block expr's value live.
RSScaner.scan(X);
- continue;
- } else {
- SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
- for (; SI != SE; ++SI)
- SymReaper.maybeDead(*SI);
}
}
diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 7d7b88a811..d6bcbb96b5 100644
--- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -284,15 +284,13 @@ ExplodedNode * const *ExplodedNode::NodeGroup::end() const {
}
int64_t ExplodedNode::getID(ExplodedGraph *G) const {
- Optional<int64_t> Out = G->getAllocator().identifyObject(this);
- assert(Out && "Wrong allocator used");
- assert(*Out % alignof(ExplodedNode) == 0 && "Wrong alignment information");
- return *Out / alignof(ExplodedNode);
+ return G->getAllocator().identifyKnownAlignedObject<ExplodedNode>(this);
}
bool ExplodedNode::isTrivial() const {
return pred_size() == 1 && succ_size() == 1 &&
- (*pred_begin())->getState()->getID() == getState()->getID();
+ getFirstPred()->getState()->getID() == getState()->getID() &&
+ getFirstPred()->succ_size() == 1;
}
ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 151865f874..45d0df7ae4 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -98,11 +98,12 @@ STATISTIC(NumMaxBlockCountReachedInInlined,
STATISTIC(NumTimesRetriedWithoutInlining,
"The # of times we re-evaluated a call without inlining");
-
//===----------------------------------------------------------------------===//
// Internal program state traits.
//===----------------------------------------------------------------------===//
+namespace {
+
// When modeling a C++ constructor, for a variety of reasons we need to track
// the location of the object for the duration of its ConstructionContext.
// ObjectsUnderConstruction maps statements within the construction context
@@ -137,9 +138,17 @@ public:
const ConstructionContextItem &getItem() const { return Impl.first; }
const LocationContext *getLocationContext() const { return Impl.second; }
+ ASTContext &getASTContext() const {
+ return getLocationContext()->getDecl()->getASTContext();
+ }
+
void print(llvm::raw_ostream &OS, PrinterHelper *Helper, PrintingPolicy &PP) {
- OS << '(' << getLocationContext() << ',' << getAnyASTNodePtr() << ','
- << getItem().getKindAsString();
+ OS << "(LC" << getLocationContext()->getID() << ',';
+ if (const Stmt *S = getItem().getStmtOrNull())
+ OS << 'S' << S->getID(getASTContext());
+ else
+ OS << 'I' << getItem().getCXXCtorInitializer()->getID(getASTContext());
+ OS << ',' << getItem().getKindAsString();
if (getItem().getKind() == ConstructionContextItem::ArgumentKind)
OS << " #" << getItem().getIndex();
OS << ") ";
@@ -164,6 +173,7 @@ public:
return Impl < RHS.Impl;
}
};
+} // namespace
typedef llvm::ImmutableMap<ConstructedObjectKey, SVal>
ObjectsUnderConstructionMap;
@@ -191,9 +201,9 @@ ExprEngine::ExprEngine(cross_tu::CrossTranslationUnitContext &CTU,
svalBuilder(StateMgr.getSValBuilder()), ObjCNoRet(mgr.getASTContext()),
BR(mgr, *this),
VisitedCallees(VisitedCalleesIn), HowToInline(HowToInlineIn) {
- unsigned TrimInterval = mgr.options.getGraphTrimInterval();
+ unsigned TrimInterval = mgr.options.GraphTrimInterval;
if (TrimInterval != 0) {
- // Enable eager node reclaimation when constructing the ExplodedGraph.
+ // Enable eager node reclamation when constructing the ExplodedGraph.
G.enableNodeReclamation(TrimInterval);
}
}
@@ -673,44 +683,35 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
// Process any special transfer function for dead symbols.
// A tag to track convenience transitions, which can be removed at cleanup.
static SimpleProgramPointTag cleanupTag(TagProviderName, "Clean Node");
- if (!SymReaper.hasDeadSymbols()) {
- // Generate a CleanedNode that has the environment and store cleaned
- // up. Since no symbols are dead, we can optimize and not clean out
- // the constraint manager.
- StmtNodeBuilder Bldr(Pred, Out, *currBldrCtx);
- Bldr.generateNode(DiagnosticStmt, Pred, CleanedState, &cleanupTag, K);
-
- } else {
- // Call checkers with the non-cleaned state so that they could query the
- // values of the soon to be dead symbols.
- ExplodedNodeSet CheckedSet;
- getCheckerManager().runCheckersForDeadSymbols(CheckedSet, Pred, SymReaper,
- DiagnosticStmt, *this, K);
-
- // For each node in CheckedSet, generate CleanedNodes that have the
- // environment, the store, and the constraints cleaned up but have the
- // user-supplied states as the predecessors.
- StmtNodeBuilder Bldr(CheckedSet, Out, *currBldrCtx);
- for (const auto I : CheckedSet) {
- ProgramStateRef CheckerState = I->getState();
-
- // The constraint manager has not been cleaned up yet, so clean up now.
- CheckerState = getConstraintManager().removeDeadBindings(CheckerState,
- SymReaper);
-
- assert(StateMgr.haveEqualEnvironments(CheckerState, Pred->getState()) &&
- "Checkers are not allowed to modify the Environment as a part of "
- "checkDeadSymbols processing.");
- assert(StateMgr.haveEqualStores(CheckerState, Pred->getState()) &&
- "Checkers are not allowed to modify the Store as a part of "
- "checkDeadSymbols processing.");
-
- // Create a state based on CleanedState with CheckerState GDM and
- // generate a transition to that state.
- ProgramStateRef CleanedCheckerSt =
+ // Call checkers with the non-cleaned state so that they could query the
+ // values of the soon to be dead symbols.
+ ExplodedNodeSet CheckedSet;
+ getCheckerManager().runCheckersForDeadSymbols(CheckedSet, Pred, SymReaper,
+ DiagnosticStmt, *this, K);
+
+ // For each node in CheckedSet, generate CleanedNodes that have the
+ // environment, the store, and the constraints cleaned up but have the
+ // user-supplied states as the predecessors.
+ StmtNodeBuilder Bldr(CheckedSet, Out, *currBldrCtx);
+ for (const auto I : CheckedSet) {
+ ProgramStateRef CheckerState = I->getState();
+
+ // The constraint manager has not been cleaned up yet, so clean up now.
+ CheckerState =
+ getConstraintManager().removeDeadBindings(CheckerState, SymReaper);
+
+ assert(StateMgr.haveEqualEnvironments(CheckerState, Pred->getState()) &&
+ "Checkers are not allowed to modify the Environment as a part of "
+ "checkDeadSymbols processing.");
+ assert(StateMgr.haveEqualStores(CheckerState, Pred->getState()) &&
+ "Checkers are not allowed to modify the Store as a part of "
+ "checkDeadSymbols processing.");
+
+ // Create a state based on CleanedState with CheckerState GDM and
+ // generate a transition to that state.
+ ProgramStateRef CleanedCheckerSt =
StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState);
- Bldr.generateNode(DiagnosticStmt, I, CleanedCheckerSt, &cleanupTag, K);
- }
+ Bldr.generateNode(DiagnosticStmt, I, CleanedCheckerSt, &cleanupTag, K);
}
}
@@ -753,7 +754,7 @@ void ExprEngine::ProcessLoopExit(const Stmt* S, ExplodedNode *Pred) {
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
ProgramStateRef NewState = Pred->getState();
- if(AMgr.options.shouldUnrollLoops())
+ if(AMgr.options.ShouldUnrollLoops)
NewState = processLoopEnd(S, NewState);
LoopExit PP(S, Pred->getLocationContext());
@@ -885,7 +886,7 @@ void ExprEngine::ProcessNewAllocator(const CXXNewExpr *NE,
// TODO: We're not evaluating allocators for all cases just yet as
// we're not handling the return value correctly, which causes false
// positives when the alpha.cplusplus.NewDeleteLeaks check is on.
- if (Opts.mayInlineCXXAllocator())
+ if (Opts.MayInlineCXXAllocator)
VisitCXXNewAllocatorCall(NE, Pred, Dst);
else {
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
@@ -1032,7 +1033,7 @@ void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
MR = V->getAsRegion();
}
- // If copy elision has occured, and the constructor corresponding to the
+ // If copy elision has occurred, and the constructor corresponding to the
// destructor was elided, we need to skip the destructor as well.
if (isDestructorElided(State, BTE, LC)) {
State = cleanupElidedDestructor(State, BTE, LC);
@@ -1100,7 +1101,7 @@ void ExprEngine::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *BTE,
// This is a fallback solution in case we didn't have a construction
// context when we were constructing the temporary. Otherwise the map should
// have been populated there.
- if (!getAnalysisManager().options.includeTemporaryDtorsInCFG()) {
+ if (!getAnalysisManager().options.ShouldIncludeTemporaryDtorsInCFG) {
// In case we don't have temporary destructors in the CFG, do not mark
// the initialization - we would otherwise never clean it up.
Dst = PreVisit;
@@ -1280,6 +1281,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
+ case Expr::ConstantExprClass:
case Stmt::ExprWithCleanupsClass:
// Handled due to fully linearised CFG.
break;
@@ -1460,7 +1462,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
break;
case Stmt::LambdaExprClass:
- if (AMgr.options.shouldInlineLambdas()) {
+ if (AMgr.options.ShouldInlineLambdas) {
Bldr.takeNodes(Pred);
VisitLambdaExpr(cast<LambdaExpr>(S), Pred, Dst);
Bldr.addNodes(Dst);
@@ -1489,7 +1491,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.takeNodes(Pred);
- if (AMgr.options.shouldEagerlyAssume() &&
+ if (AMgr.options.ShouldEagerlyAssume &&
(B->isRelationalOp() || B->isEqualityOp())) {
ExplodedNodeSet Tmp;
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
@@ -1753,7 +1755,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::UnaryOperatorClass: {
Bldr.takeNodes(Pred);
const auto *U = cast<UnaryOperator>(S);
- if (AMgr.options.shouldEagerlyAssume() && (U->getOpcode() == UO_LNot)) {
+ if (AMgr.options.ShouldEagerlyAssume && (U->getOpcode() == UO_LNot)) {
ExplodedNodeSet Tmp;
VisitUnaryOperator(U, Pred, Tmp);
evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, U);
@@ -1854,7 +1856,7 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
// If we reach a loop which has a known bound (and meets
// other constraints) then consider completely unrolling it.
- if(AMgr.options.shouldUnrollLoops()) {
+ if(AMgr.options.ShouldUnrollLoops) {
unsigned maxBlockVisitOnPath = AMgr.options.maxBlockVisitOnPath;
const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminator();
if (Term) {
@@ -1876,7 +1878,7 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
// maximum number of times, widen the loop.
unsigned int BlockCount = nodeBuilder.getContext().blockCount();
if (BlockCount == AMgr.options.maxBlockVisitOnPath - 1 &&
- AMgr.options.shouldWidenLoops()) {
+ AMgr.options.ShouldWidenLoops) {
const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminator();
if (!(Term &&
(isa<ForStmt>(Term) || isa<WhileStmt>(Term) || isa<DoStmt>(Term))))
@@ -2377,7 +2379,7 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
const auto *DeclRefEx = dyn_cast<DeclRefExpr>(Ex);
Optional<std::pair<SVal, QualType>> VInfo;
- if (AMgr.options.shouldInlineLambdas() && DeclRefEx &&
+ if (AMgr.options.ShouldInlineLambdas && DeclRefEx &&
DeclRefEx->refersToEnclosingVariableOrCapture() && MD &&
MD->getParent()->isLambda()) {
// Lookup the field of the lambda.
@@ -3106,3 +3108,8 @@ std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode*> Nodes,
llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
return "";
}
+
+void *ProgramStateTrait<ReplayWithoutInlining>::GDMIndex() {
+ static int index = 0;
+ return &index;
+}
diff --git a/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 61b7a290e4..7d47cf4f33 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -412,10 +412,11 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
- case CK_LValueBitCast: {
+ case CK_LValueBitCast:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean: {
state =
handleLValueBitCast(state, Ex, LCtx, T, ExTy, CastE, Bldr, Pred);
continue;
@@ -809,8 +810,9 @@ void ExprEngine::
VisitOffsetOfExpr(const OffsetOfExpr *OOE,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
- APSInt IV;
- if (OOE->EvaluateAsInt(IV, getContext())) {
+ Expr::EvalResult Result;
+ if (OOE->EvaluateAsInt(Result, getContext())) {
+ APSInt IV = Result.Val.getInt();
assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
assert(OOE->getType()->isBuiltinType());
assert(OOE->getType()->getAs<BuiltinType>()->isInteger());
@@ -956,7 +958,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
}
case UO_Plus:
assert(!U->isGLValue());
- // FALL-THROUGH.
+ LLVM_FALLTHROUGH;
case UO_Deref:
case UO_Extension: {
handleUOExtension(I, U, Bldr);
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 933d54e11c..1f64976a9f 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -159,7 +159,7 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
return std::make_pair(State, FieldVal);
}
case ConstructionContext::NewAllocatedObjectKind: {
- if (AMgr.getAnalyzerOptions().mayInlineCXXAllocator()) {
+ if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
const auto *NECC = cast<NewAllocatedObjectConstructionContext>(CC);
const auto *NE = NECC->getCXXNewExpr();
SVal V = *getObjectUnderConstruction(State, NE, LCtx);
@@ -210,7 +210,7 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
llvm_unreachable("Unhandled return value construction context!");
}
case ConstructionContext::ElidedTemporaryObjectKind: {
- assert(AMgr.getAnalyzerOptions().shouldElideConstructors());
+ assert(AMgr.getAnalyzerOptions().ShouldElideConstructors);
const auto *TCC = cast<ElidedTemporaryObjectConstructionContext>(CC);
const CXXBindTemporaryExpr *BTE = TCC->getCXXBindTemporaryExpr();
const MaterializeTemporaryExpr *MTE = TCC->getMaterializedTemporaryExpr();
@@ -426,7 +426,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
}
}
}
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CXXConstructExpr::CK_NonVirtualBase:
// In C++17, classes with non-virtual bases may be aggregates, so they would
// be initialized as aggregates without a constructor call, so we may have
@@ -445,7 +445,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
break;
}
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CXXConstructExpr::CK_Delegating: {
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
@@ -706,7 +706,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
ProgramStateRef State = Pred->getState();
// Retrieve the stored operator new() return value.
- if (AMgr.getAnalyzerOptions().mayInlineCXXAllocator()) {
+ if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
symVal = *getObjectUnderConstruction(State, CNE, LCtx);
State = finishObjectConstruction(State, CNE, LCtx);
}
@@ -726,7 +726,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
CallEventRef<CXXAllocatorCall> Call =
CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
- if (!AMgr.getAnalyzerOptions().mayInlineCXXAllocator()) {
+ if (!AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
// Invalidate placement args.
// FIXME: Once we figure out how we want allocators to work,
// we should be using the usual pre-/(default-)eval-/post-call checks here.
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 66cbebad1c..758195d8d9 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -349,7 +349,7 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
/*WasInlined=*/true);
} else if (CE &&
!(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
- AMgr.getAnalyzerOptions().mayInlineCXXAllocator())) {
+ AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
*this, /*WasInlined=*/true);
} else {
@@ -386,7 +386,7 @@ void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
// Do not count the small functions when determining the stack depth.
AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
const CFG *CalleeCFG = CalleeADC->getCFG();
- if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
+ if (CalleeCFG->getNumBlockIDs() > AMgr.options.AlwaysInlineSize)
++StackDepth;
}
LCtx = LCtx->getParent();
@@ -683,7 +683,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
: nullptr;
if (CC && isa<NewAllocatedObjectConstructionContext>(CC) &&
- !Opts.mayInlineCXXAllocator())
+ !Opts.MayInlineCXXAllocator)
return CIP_DisallowedOnce;
// FIXME: We don't handle constructors or destructors for arrays properly.
@@ -712,7 +712,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
// If we don't handle temporary destructors, we shouldn't inline
// their constructors.
if (CallOpts.IsTemporaryCtorOrDtor &&
- !Opts.includeTemporaryDtorsInCFG())
+ !Opts.ShouldIncludeTemporaryDtorsInCFG)
return CIP_DisallowedOnce;
// If we did not find the correct this-region, it would be pointless
@@ -743,7 +743,8 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
return CIP_DisallowedOnce;
// Allow disabling temporary destructor inlining with a separate option.
- if (CallOpts.IsTemporaryCtorOrDtor && !Opts.mayInlineCXXTemporaryDtors())
+ if (CallOpts.IsTemporaryCtorOrDtor &&
+ !Opts.MayInlineCXXTemporaryDtors)
return CIP_DisallowedOnce;
// If we did not find the correct this-region, it would be pointless
@@ -754,13 +755,13 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
break;
}
case CE_CXXAllocator:
- if (Opts.mayInlineCXXAllocator())
+ if (Opts.MayInlineCXXAllocator)
break;
// Do not inline allocators until we model deallocators.
// This is unfortunate, but basically necessary for smart pointers and such.
return CIP_DisallowedAlways;
case CE_ObjCMessage:
- if (!Opts.mayInlineObjCMethod())
+ if (!Opts.MayInlineObjCMethod)
return CIP_DisallowedAlways;
if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
@@ -844,19 +845,19 @@ static bool mayInlineDecl(AnalysisManager &AMgr,
if (Ctx.getLangOpts().CPlusPlus) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
// Conditionally control the inlining of template functions.
- if (!Opts.mayInlineTemplateFunctions())
+ if (!Opts.MayInlineTemplateFunctions)
if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
return false;
// Conditionally control the inlining of C++ standard library functions.
- if (!Opts.mayInlineCXXStandardLibrary())
+ if (!Opts.MayInlineCXXStandardLibrary)
if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
if (AnalysisDeclContext::isInStdNamespace(FD))
return false;
// Conditionally control the inlining of methods on objects that look
// like C++ containers.
- if (!Opts.mayInlineCXXContainerMethods())
+ if (!Opts.MayInlineCXXContainerMethods)
if (!AMgr.isInCodeFile(FD->getLocation()))
if (isContainerMethod(Ctx, FD))
return false;
@@ -865,7 +866,7 @@ static bool mayInlineDecl(AnalysisManager &AMgr,
// We don't currently do a good job modeling shared_ptr because we can't
// see the reference count, so treating as opaque is probably the best
// idea.
- if (!Opts.mayInlineCXXSharedPtrDtor())
+ if (!Opts.MayInlineCXXSharedPtrDtor)
if (isCXXSharedPtrDtor(FD))
return false;
}
@@ -878,7 +879,7 @@ static bool mayInlineDecl(AnalysisManager &AMgr,
return false;
// Do not inline large functions.
- if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
+ if (CalleeCFG->getNumBlockIDs() > Opts.MaxInlinableSize)
return false;
// It is possible that the live variables analysis cannot be
@@ -946,21 +947,21 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
unsigned StackDepth = 0;
examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
if ((StackDepth >= Opts.InlineMaxStackDepth) &&
- ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
+ ((CalleeCFG->getNumBlockIDs() > Opts.AlwaysInlineSize)
|| IsRecursive))
return false;
// Do not inline large functions too many times.
if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
- Opts.getMaxTimesInlineLarge()) &&
+ Opts.MaxTimesInlineLarge) &&
CalleeCFG->getNumBlockIDs() >=
- Opts.getMinCFGSizeTreatFunctionsAsLarge()) {
+ Opts.MinCFGSizeTreatFunctionsAsLarge) {
NumReachedInlineCountMax++;
return false;
}
if (HowToInline == Inline_Minimal &&
- (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
+ (CalleeCFG->getNumBlockIDs() > Opts.AlwaysInlineSize
|| IsRecursive))
return false;
diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index d5764a4099..fc82f11769 100644
--- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -218,7 +218,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
int FD;
SmallString<128> Model, ResultPath;
- if (!AnalyzerOpts.shouldWriteStableReportFilename()) {
+ if (!AnalyzerOpts.ShouldWriteStableReportFilename) {
llvm::sys::path::append(Model, Directory, "report-%%%%%%.html");
if (std::error_code EC =
llvm::sys::fs::make_absolute(Model)) {
diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp
index 221b9176de..da368de322 100644
--- a/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -1175,6 +1175,15 @@ const MemRegion *MemRegion::getBaseRegion() const {
return R;
}
+// getgetMostDerivedObjectRegion gets the region of the root class of a C++
+// class hierarchy.
+const MemRegion *MemRegion::getMostDerivedObjectRegion() const {
+ const MemRegion *R = this;
+ while (const auto *BR = dyn_cast<CXXBaseObjectRegion>(R))
+ R = BR->getSuperRegion();
+ return R;
+}
+
bool MemRegion::isSubRegionOf(const MemRegion *) const {
return false;
}
diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index b45dfcfb36..db4cf76578 100644
--- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/TokenConcatenation.h"
#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
@@ -24,20 +25,26 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
+
using namespace clang;
using namespace ento;
using namespace markup;
+//===----------------------------------------------------------------------===//
+// Declarations of helper classes and functions for emitting bug reports in
+// plist format.
+//===----------------------------------------------------------------------===//
+
namespace {
class PlistDiagnostics : public PathDiagnosticConsumer {
const std::string OutputFile;
- const LangOptions &LangOpts;
+ const Preprocessor &PP;
+ AnalyzerOptions &AnOpts;
const bool SupportsCrossFileDiagnostics;
- const bool SerializeStatistics;
public:
PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
const std::string& prefix,
- const LangOptions &LangOpts,
+ const Preprocessor &PP,
bool supportsMultipleFiles);
~PlistDiagnostics() override {}
@@ -59,37 +66,116 @@ namespace {
};
} // end anonymous namespace
-PlistDiagnostics::PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
- const std::string& output,
- const LangOptions &LO,
- bool supportsMultipleFiles)
- : OutputFile(output),
- LangOpts(LO),
- SupportsCrossFileDiagnostics(supportsMultipleFiles),
- SerializeStatistics(AnalyzerOpts.shouldSerializeStats()) {}
+namespace {
-void ento::createPlistDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
- PathDiagnosticConsumers &C,
- const std::string& s,
- const Preprocessor &PP) {
- C.push_back(new PlistDiagnostics(AnalyzerOpts, s,
- PP.getLangOpts(), false));
-}
+/// A helper class for emitting a single report.
+class PlistPrinter {
+ const FIDMap& FM;
+ AnalyzerOptions &AnOpts;
+ const Preprocessor &PP;
+ llvm::SmallVector<const PathDiagnosticMacroPiece *, 0> MacroPieces;
+
+public:
+ PlistPrinter(const FIDMap& FM, AnalyzerOptions &AnOpts,
+ const Preprocessor &PP)
+ : FM(FM), AnOpts(AnOpts), PP(PP) {
+ }
-void ento::createPlistMultiFileDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
- PathDiagnosticConsumers &C,
- const std::string &s,
- const Preprocessor &PP) {
- C.push_back(new PlistDiagnostics(AnalyzerOpts, s,
- PP.getLangOpts(), true));
-}
+ void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P) {
+ ReportPiece(o, P, /*indent*/ 4, /*depth*/ 0, /*includeControlFlow*/ true);
+
+ // Don't emit a warning about an unused private field.
+ (void)AnOpts;
+ }
+
+ /// Print the expansions of the collected macro pieces.
+ ///
+ /// Each time ReportDiag is called on a PathDiagnosticMacroPiece (or, if one
+ /// is found through a call piece, etc), it's subpieces are reported, and the
+ /// piece itself is collected. Call this function after the entire bugpath
+ /// was reported.
+ void ReportMacroExpansions(raw_ostream &o, unsigned indent);
+
+private:
+ void ReportPiece(raw_ostream &o, const PathDiagnosticPiece &P,
+ unsigned indent, unsigned depth, bool includeControlFlow,
+ bool isKeyEvent = false) {
+ switch (P.getKind()) {
+ case PathDiagnosticPiece::ControlFlow:
+ if (includeControlFlow)
+ ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), indent);
+ break;
+ case PathDiagnosticPiece::Call:
+ ReportCall(o, cast<PathDiagnosticCallPiece>(P), indent,
+ depth);
+ break;
+ case PathDiagnosticPiece::Event:
+ ReportEvent(o, cast<PathDiagnosticEventPiece>(P), indent, depth,
+ isKeyEvent);
+ break;
+ case PathDiagnosticPiece::Macro:
+ ReportMacroSubPieces(o, cast<PathDiagnosticMacroPiece>(P), indent,
+ depth);
+ break;
+ case PathDiagnosticPiece::Note:
+ ReportNote(o, cast<PathDiagnosticNotePiece>(P), indent);
+ break;
+ }
+ }
+
+ void EmitRanges(raw_ostream &o, const ArrayRef<SourceRange> Ranges,
+ unsigned indent);
+ void EmitMessage(raw_ostream &o, StringRef Message, unsigned indent);
+
+ void ReportControlFlow(raw_ostream &o,
+ const PathDiagnosticControlFlowPiece& P,
+ unsigned indent);
+ void ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
+ unsigned indent, unsigned depth, bool isKeyEvent = false);
+ void ReportCall(raw_ostream &o, const PathDiagnosticCallPiece &P,
+ unsigned indent, unsigned depth);
+ void ReportMacroSubPieces(raw_ostream &o, const PathDiagnosticMacroPiece& P,
+ unsigned indent, unsigned depth);
+ void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
+ unsigned indent);
+};
+
+} // end of anonymous namespace
-static void EmitRanges(raw_ostream &o,
- const ArrayRef<SourceRange> Ranges,
- const FIDMap& FM,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent) {
+namespace {
+
+struct ExpansionInfo {
+ std::string MacroName;
+ std::string Expansion;
+ ExpansionInfo(std::string N, std::string E)
+ : MacroName(std::move(N)), Expansion(std::move(E)) {}
+};
+
+} // end of anonymous namespace
+
+static void printBugPath(llvm::raw_ostream &o, const FIDMap& FM,
+ AnalyzerOptions &AnOpts,
+ const Preprocessor &PP,
+ const PathPieces &Path);
+
+/// Print coverage information to output stream {@code o}.
+/// May modify the used list of files {@code Fids} by inserting new ones.
+static void printCoverage(const PathDiagnostic *D,
+ unsigned InputIndentLevel,
+ SmallVectorImpl<FileID> &Fids,
+ FIDMap &FM,
+ llvm::raw_fd_ostream &o);
+
+static ExpansionInfo getExpandedMacro(SourceLocation MacroLoc,
+ const Preprocessor &PP);
+
+//===----------------------------------------------------------------------===//
+// Methods of PlistPrinter.
+//===----------------------------------------------------------------------===//
+
+void PlistPrinter::EmitRanges(raw_ostream &o,
+ const ArrayRef<SourceRange> Ranges,
+ unsigned indent) {
if (Ranges.empty())
return;
@@ -97,6 +183,10 @@ static void EmitRanges(raw_ostream &o,
Indent(o, indent) << "<key>ranges</key>\n";
Indent(o, indent) << "<array>\n";
++indent;
+
+ const SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
+
for (auto &R : Ranges)
EmitRange(o, SM,
Lexer::getAsCharRange(SM.getExpansionRange(R), SM, LangOpts),
@@ -105,7 +195,8 @@ static void EmitRanges(raw_ostream &o,
Indent(o, indent) << "</array>\n";
}
-static void EmitMessage(raw_ostream &o, StringRef Message, unsigned indent) {
+void PlistPrinter::EmitMessage(raw_ostream &o, StringRef Message,
+ unsigned indent) {
// Output the text.
assert(!Message.empty());
Indent(o, indent) << "<key>extended_message</key>\n";
@@ -119,12 +210,12 @@ static void EmitMessage(raw_ostream &o, StringRef Message, unsigned indent) {
EmitString(o, Message) << '\n';
}
-static void ReportControlFlow(raw_ostream &o,
- const PathDiagnosticControlFlowPiece& P,
- const FIDMap& FM,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent) {
+void PlistPrinter::ReportControlFlow(raw_ostream &o,
+ const PathDiagnosticControlFlowPiece& P,
+ unsigned indent) {
+
+ const SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
Indent(o, indent) << "<dict>\n";
++indent;
@@ -173,13 +264,11 @@ static void ReportControlFlow(raw_ostream &o,
Indent(o, indent) << "</dict>\n";
}
-static void ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
- const FIDMap& FM,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth,
- bool isKeyEvent = false) {
+void PlistPrinter::ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
+ unsigned indent, unsigned depth,
+ bool isKeyEvent) {
+
+ const SourceManager &SM = PP.getSourceManager();
Indent(o, indent) << "<dict>\n";
++indent;
@@ -198,7 +287,7 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
// Output the ranges (if any).
ArrayRef<SourceRange> Ranges = P.getRanges();
- EmitRanges(o, Ranges, FM, SM, LangOpts, indent);
+ EmitRanges(o, Ranges, indent);
// Output the call depth.
Indent(o, indent) << "<key>depth</key>";
@@ -212,61 +301,80 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
Indent(o, indent); o << "</dict>\n";
}
-static void ReportPiece(raw_ostream &o,
- const PathDiagnosticPiece &P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth,
- bool includeControlFlow,
- bool isKeyEvent = false);
-
-static void ReportCall(raw_ostream &o,
- const PathDiagnosticCallPiece &P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth) {
+void PlistPrinter::ReportCall(raw_ostream &o, const PathDiagnosticCallPiece &P,
+ unsigned indent,
+ unsigned depth) {
if (auto callEnter = P.getCallEnterEvent())
- ReportPiece(o, *callEnter, FM, SM, LangOpts, indent, depth, true,
+ ReportPiece(o, *callEnter, indent, depth, /*includeControlFlow*/ true,
P.isLastInMainSourceFile());
++depth;
if (auto callEnterWithinCaller = P.getCallEnterWithinCallerEvent())
- ReportPiece(o, *callEnterWithinCaller, FM, SM, LangOpts,
- indent, depth, true);
+ ReportPiece(o, *callEnterWithinCaller, indent, depth,
+ /*includeControlFlow*/ true);
for (PathPieces::const_iterator I = P.path.begin(), E = P.path.end();I!=E;++I)
- ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, true);
+ ReportPiece(o, **I, indent, depth, /*includeControlFlow*/ true);
--depth;
if (auto callExit = P.getCallExitEvent())
- ReportPiece(o, *callExit, FM, SM, LangOpts, indent, depth, true);
+ ReportPiece(o, *callExit, indent, depth, /*includeControlFlow*/ true);
}
-static void ReportMacro(raw_ostream &o,
- const PathDiagnosticMacroPiece& P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth) {
+void PlistPrinter::ReportMacroSubPieces(raw_ostream &o,
+ const PathDiagnosticMacroPiece& P,
+ unsigned indent, unsigned depth) {
+ MacroPieces.push_back(&P);
- for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
- I!=E; ++I) {
- ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, false);
+ for (PathPieces::const_iterator I = P.subPieces.begin(),
+ E = P.subPieces.end();
+ I != E; ++I) {
+ ReportPiece(o, **I, indent, depth, /*includeControlFlow*/ false);
}
}
-static void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
- const FIDMap& FM,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth) {
+void PlistPrinter::ReportMacroExpansions(raw_ostream &o, unsigned indent) {
+
+ for (const PathDiagnosticMacroPiece *P : MacroPieces) {
+ const SourceManager &SM = PP.getSourceManager();
+ ExpansionInfo EI = getExpandedMacro(P->getLocation().asLocation(), PP);
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ // Output the location.
+ FullSourceLoc L = P->getLocation().asLocation();
+
+ Indent(o, indent) << "<key>location</key>\n";
+ EmitLocation(o, SM, L, FM, indent);
+
+ // Output the ranges (if any).
+ ArrayRef<SourceRange> Ranges = P->getRanges();
+ EmitRanges(o, Ranges, indent);
+
+ // Output the macro name.
+ Indent(o, indent) << "<key>name</key>";
+ EmitString(o, EI.MacroName) << '\n';
+
+ // Output what it expands into.
+ Indent(o, indent) << "<key>expansion</key>";
+ EmitString(o, EI.Expansion) << '\n';
+
+ // Finish up.
+ --indent;
+ Indent(o, indent);
+ o << "</dict>\n";
+ }
+}
+
+void PlistPrinter::ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
+ unsigned indent) {
+
+ const SourceManager &SM = PP.getSourceManager();
Indent(o, indent) << "<dict>\n";
++indent;
@@ -279,7 +387,7 @@ static void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
// Output the ranges (if any).
ArrayRef<SourceRange> Ranges = P.getRanges();
- EmitRanges(o, Ranges, FM, SM, LangOpts, indent);
+ EmitRanges(o, Ranges, indent);
// Output the text.
EmitMessage(o, P.getString(), indent);
@@ -289,44 +397,9 @@ static void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
Indent(o, indent); o << "</dict>\n";
}
-static void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts) {
- ReportPiece(o, P, FM, SM, LangOpts, 4, 0, true);
-}
-
-static void ReportPiece(raw_ostream &o,
- const PathDiagnosticPiece &P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth,
- bool includeControlFlow,
- bool isKeyEvent) {
- switch (P.getKind()) {
- case PathDiagnosticPiece::ControlFlow:
- if (includeControlFlow)
- ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), FM, SM,
- LangOpts, indent);
- break;
- case PathDiagnosticPiece::Call:
- ReportCall(o, cast<PathDiagnosticCallPiece>(P), FM, SM, LangOpts,
- indent, depth);
- break;
- case PathDiagnosticPiece::Event:
- ReportEvent(o, cast<PathDiagnosticEventPiece>(P), FM, SM, LangOpts,
- indent, depth, isKeyEvent);
- break;
- case PathDiagnosticPiece::Macro:
- ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts,
- indent, depth);
- break;
- case PathDiagnosticPiece::Note:
- ReportNote(o, cast<PathDiagnosticNotePiece>(P), FM, SM, LangOpts,
- indent, depth);
- break;
- }
-}
+//===----------------------------------------------------------------------===//
+// Static function definitions.
+//===----------------------------------------------------------------------===//
/// Print coverage information to output stream {@code o}.
/// May modify the used list of files {@code Fids} by inserting new ones.
@@ -361,6 +434,78 @@ static void printCoverage(const PathDiagnostic *D,
assert(IndentLevel == InputIndentLevel);
}
+static void printBugPath(llvm::raw_ostream &o, const FIDMap& FM,
+ AnalyzerOptions &AnOpts,
+ const Preprocessor &PP,
+ const PathPieces &Path) {
+ PlistPrinter Printer(FM, AnOpts, PP);
+ assert(std::is_partitioned(
+ Path.begin(), Path.end(),
+ [](const std::shared_ptr<PathDiagnosticPiece> &E)
+ { return E->getKind() == PathDiagnosticPiece::Note; }) &&
+ "PathDiagnostic is not partitioned so that notes precede the rest");
+
+ PathPieces::const_iterator FirstNonNote = std::partition_point(
+ Path.begin(), Path.end(),
+ [](const std::shared_ptr<PathDiagnosticPiece> &E)
+ { return E->getKind() == PathDiagnosticPiece::Note; });
+
+ PathPieces::const_iterator I = Path.begin();
+
+ if (FirstNonNote != Path.begin()) {
+ o << " <key>notes</key>\n"
+ " <array>\n";
+
+ for (; I != FirstNonNote; ++I)
+ Printer.ReportDiag(o, **I);
+
+ o << " </array>\n";
+ }
+
+ o << " <key>path</key>\n";
+
+ o << " <array>\n";
+
+ for (PathPieces::const_iterator E = Path.end(); I != E; ++I)
+ Printer.ReportDiag(o, **I);
+
+ o << " </array>\n";
+
+ if (!AnOpts.ShouldDisplayMacroExpansions)
+ return;
+
+ o << " <key>macro_expansions</key>\n"
+ " <array>\n";
+ Printer.ReportMacroExpansions(o, /* indent */ 4);
+ o << " </array>\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Methods of PlistDiagnostics.
+//===----------------------------------------------------------------------===//
+
+PlistDiagnostics::PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
+ const std::string& output,
+ const Preprocessor &PP,
+ bool supportsMultipleFiles)
+ : OutputFile(output), PP(PP), AnOpts(AnalyzerOpts),
+ SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
+
+void ento::createPlistDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string& s,
+ const Preprocessor &PP) {
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, s, PP,
+ /*supportsMultipleFiles*/ false));
+}
+
+void ento::createPlistMultiFileDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string &s,
+ const Preprocessor &PP) {
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, s, PP,
+ /*supportsMultipleFiles*/ true));
+}
void PlistDiagnostics::FlushDiagnosticsImpl(
std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) {
@@ -368,17 +513,15 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// ranges of the diagnostics.
FIDMap FM;
SmallVector<FileID, 10> Fids;
- const SourceManager* SM = nullptr;
-
- if (!Diags.empty())
- SM = &Diags.front()->path.front()->getLocation().getManager();
+ const SourceManager& SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
- auto AddPieceFID = [&FM, &Fids, SM](const PathDiagnosticPiece &Piece) {
- AddFID(FM, Fids, *SM, Piece.getLocation().asLocation());
+ auto AddPieceFID = [&FM, &Fids, &SM](const PathDiagnosticPiece &Piece) {
+ AddFID(FM, Fids, SM, Piece.getLocation().asLocation());
ArrayRef<SourceRange> Ranges = Piece.getRanges();
for (const SourceRange &Range : Ranges) {
- AddFID(FM, Fids, *SM, Range.getBegin());
- AddFID(FM, Fids, *SM, Range.getEnd());
+ AddFID(FM, Fids, SM, Range.getBegin());
+ AddFID(FM, Fids, SM, Range.getEnd());
}
};
@@ -437,39 +580,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " <dict>\n";
const PathDiagnostic *D = *DI;
- const PathPieces &PP = D->path;
-
- assert(std::is_partitioned(
- PP.begin(), PP.end(),
- [](const std::shared_ptr<PathDiagnosticPiece> &E)
- { return E->getKind() == PathDiagnosticPiece::Note; }) &&
- "PathDiagnostic is not partitioned so that notes precede the rest");
-
- PathPieces::const_iterator FirstNonNote = std::partition_point(
- PP.begin(), PP.end(),
- [](const std::shared_ptr<PathDiagnosticPiece> &E)
- { return E->getKind() == PathDiagnosticPiece::Note; });
-
- PathPieces::const_iterator I = PP.begin();
-
- if (FirstNonNote != PP.begin()) {
- o << " <key>notes</key>\n"
- " <array>\n";
-
- for (; I != FirstNonNote; ++I)
- ReportDiag(o, **I, FM, *SM, LangOpts);
-
- o << " </array>\n";
- }
-
- o << " <key>path</key>\n";
-
- o << " <array>\n";
-
- for (PathPieces::const_iterator E = PP.end(); I != E; ++I)
- ReportDiag(o, **I, FM, *SM, LangOpts);
-
- o << " </array>\n";
+ printBugPath(o, FM, AnOpts, PP, D->path);
// Output the bug type and bug category.
o << " <key>description</key>";
@@ -484,12 +595,12 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " <!-- This hash is experimental and going to change! -->\n";
o << " <key>issue_hash_content_of_line_in_context</key>";
PathDiagnosticLocation UPDLoc = D->getUniqueingLoc();
- FullSourceLoc L(SM->getExpansionLoc(UPDLoc.isValid()
+ FullSourceLoc L(SM.getExpansionLoc(UPDLoc.isValid()
? UPDLoc.asLocation()
: D->getLocation().asLocation()),
- *SM);
+ SM);
const Decl *DeclWithIssue = D->getDeclWithIssue();
- EmitString(o, GetIssueHash(*SM, L, D->getCheckName(), D->getBugType(),
+ EmitString(o, GetIssueHash(SM, L, D->getCheckName(), D->getBugType(),
DeclWithIssue, LangOpts))
<< '\n';
@@ -534,16 +645,16 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// site and the end of scope (leak report location).
if (UPDLoc.isValid()) {
FullSourceLoc UFunL(
- SM->getExpansionLoc(
+ SM.getExpansionLoc(
D->getUniqueingDecl()->getBody()->getBeginLoc()),
- *SM);
+ SM);
o << " <key>issue_hash_function_offset</key><string>"
<< L.getExpansionLineNumber() - UFunL.getExpansionLineNumber()
<< "</string>\n";
// Otherwise, use the location on which the bug is reported.
} else {
- FullSourceLoc FunL(SM->getExpansionLoc(Body->getBeginLoc()), *SM);
+ FullSourceLoc FunL(SM.getExpansionLoc(Body->getBeginLoc()), SM);
o << " <key>issue_hash_function_offset</key><string>"
<< L.getExpansionLineNumber() - FunL.getExpansionLineNumber()
<< "</string>\n";
@@ -555,7 +666,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Output the location of the bug.
o << " <key>location</key>\n";
- EmitLocation(o, *SM, D->getLocation().asLocation(), FM, 2);
+ EmitLocation(o, SM, D->getLocation().asLocation(), FM, 2);
// Output the diagnostic to the sub-diagnostic client, if any.
if (!filesMade->empty()) {
@@ -590,10 +701,10 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " <key>files</key>\n"
" <array>\n";
for (FileID FID : Fids)
- EmitString(o << " ", SM->getFileEntryForID(FID)->getName()) << '\n';
+ EmitString(o << " ", SM.getFileEntryForID(FID)->getName()) << '\n';
o << " </array>\n";
- if (llvm::AreStatisticsEnabled() && SerializeStatistics) {
+ if (llvm::AreStatisticsEnabled() && AnOpts.ShouldSerializeStats) {
o << " <key>statistics</key>\n";
std::string stats;
llvm::raw_string_ostream os(stats);
@@ -605,3 +716,402 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Finish.
o << "</dict>\n</plist>";
}
+
+//===----------------------------------------------------------------------===//
+// Declarations of helper functions and data structures for expanding macros.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+using ExpArgTokens = llvm::SmallVector<Token, 2>;
+
+/// Maps unexpanded macro arguments to expanded arguments. A macro argument may
+/// need to expanded further when it is nested inside another macro.
+class MacroArgMap : public std::map<const IdentifierInfo *, ExpArgTokens> {
+public:
+ void expandFromPrevMacro(const MacroArgMap &Super);
+};
+
+struct MacroNameAndArgs {
+ std::string Name;
+ const MacroInfo *MI = nullptr;
+ MacroArgMap Args;
+
+ MacroNameAndArgs(std::string N, const MacroInfo *MI, MacroArgMap M)
+ : Name(std::move(N)), MI(MI), Args(std::move(M)) {}
+};
+
+class TokenPrinter {
+ llvm::raw_ostream &OS;
+ const Preprocessor &PP;
+
+ Token PrevTok, PrevPrevTok;
+ TokenConcatenation ConcatInfo;
+
+public:
+ TokenPrinter(llvm::raw_ostream &OS, const Preprocessor &PP)
+ : OS(OS), PP(PP), ConcatInfo(PP) {
+ PrevTok.setKind(tok::unknown);
+ PrevPrevTok.setKind(tok::unknown);
+ }
+
+ void printToken(const Token &Tok);
+};
+
+} // end of anonymous namespace
+
+/// The implementation method of getMacroExpansion: It prints the expansion of
+/// a macro to \p Printer, and returns with the name of the macro.
+///
+/// Since macros can be nested in one another, this function may call itself
+/// recursively.
+///
+/// Unfortunately, macro arguments have to expanded manually. To understand why,
+/// observe the following example:
+///
+/// #define PRINT(x) print(x)
+/// #define DO_SOMETHING(str) PRINT(str)
+///
+/// DO_SOMETHING("Cute panda cubs.");
+///
+/// As we expand the last line, we'll immediately replace PRINT(str) with
+/// print(x). The information that both 'str' and 'x' refers to the same string
+/// is an information we have to forward, hence the argument \p PrevArgs.
+static std::string getMacroNameAndPrintExpansion(TokenPrinter &Printer,
+ SourceLocation MacroLoc,
+ const Preprocessor &PP,
+ const MacroArgMap &PrevArgs);
+
+/// Retrieves the name of the macro and what it's arguments expand into
+/// at \p ExpanLoc.
+///
+/// For example, for the following macro expansion:
+///
+/// #define SET_TO_NULL(x) x = 0
+/// #define NOT_SUSPICIOUS(a) \
+/// { \
+/// int b = 0; \
+/// } \
+/// SET_TO_NULL(a)
+///
+/// int *ptr = new int(4);
+/// NOT_SUSPICIOUS(&ptr);
+/// *ptr = 5;
+///
+/// When \p ExpanLoc references the last line, the macro name "NOT_SUSPICIOUS"
+/// and the MacroArgMap map { (a, &ptr) } will be returned.
+///
+/// When \p ExpanLoc references "SET_TO_NULL(a)" within the definition of
+/// "NOT_SUSPICOUS", the macro name "SET_TO_NULL" and the MacroArgMap map
+/// { (x, a) } will be returned.
+static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
+ const Preprocessor &PP);
+
+/// Retrieves the ')' token that matches '(' \p It points to.
+static MacroInfo::tokens_iterator getMatchingRParen(
+ MacroInfo::tokens_iterator It,
+ MacroInfo::tokens_iterator End);
+
+/// Retrieves the macro info for \p II refers to at \p Loc. This is important
+/// because macros can be redefined or undefined.
+static const MacroInfo *getMacroInfoForLocation(const Preprocessor &PP,
+ const SourceManager &SM,
+ const IdentifierInfo *II,
+ SourceLocation Loc);
+
+//===----------------------------------------------------------------------===//
+// Definitions of helper functions and methods for expanding macros.
+//===----------------------------------------------------------------------===//
+
+static ExpansionInfo getExpandedMacro(SourceLocation MacroLoc,
+ const Preprocessor &PP) {
+
+ llvm::SmallString<200> ExpansionBuf;
+ llvm::raw_svector_ostream OS(ExpansionBuf);
+ TokenPrinter Printer(OS, PP);
+ std::string MacroName =
+ getMacroNameAndPrintExpansion(Printer, MacroLoc, PP, MacroArgMap{});
+ return { MacroName, OS.str() };
+}
+
+static std::string getMacroNameAndPrintExpansion(TokenPrinter &Printer,
+ SourceLocation MacroLoc,
+ const Preprocessor &PP,
+ const MacroArgMap &PrevArgs) {
+
+ const SourceManager &SM = PP.getSourceManager();
+
+ MacroNameAndArgs Info = getMacroNameAndArgs(SM.getExpansionLoc(MacroLoc), PP);
+
+ // Manually expand its arguments from the previous macro.
+ Info.Args.expandFromPrevMacro(PrevArgs);
+
+ // Iterate over the macro's tokens and stringify them.
+ for (auto It = Info.MI->tokens_begin(), E = Info.MI->tokens_end(); It != E;
+ ++It) {
+ Token T = *It;
+
+ // If this token is not an identifier, we only need to print it.
+ if (T.isNot(tok::identifier)) {
+ Printer.printToken(T);
+ continue;
+ }
+
+ const auto *II = T.getIdentifierInfo();
+ assert(II &&
+ "This token is an identifier but has no IdentifierInfo!");
+
+ // If this token is a macro that should be expanded inside the current
+ // macro.
+ if (const MacroInfo *MI =
+ getMacroInfoForLocation(PP, SM, II, T.getLocation())) {
+ getMacroNameAndPrintExpansion(Printer, T.getLocation(), PP, Info.Args);
+
+ // If this is a function-like macro, skip its arguments, as
+ // getExpandedMacro() already printed them. If this is the case, let's
+ // first jump to the '(' token.
+ if (MI->getNumParams() != 0)
+ It = getMatchingRParen(++It, E);
+ continue;
+ }
+
+ // If this token is the current macro's argument, we should expand it.
+ auto ArgMapIt = Info.Args.find(II);
+ if (ArgMapIt != Info.Args.end()) {
+ for (MacroInfo::tokens_iterator ArgIt = ArgMapIt->second.begin(),
+ ArgEnd = ArgMapIt->second.end();
+ ArgIt != ArgEnd; ++ArgIt) {
+
+ // These tokens may still be macros, if that is the case, handle it the
+ // same way we did above.
+ const auto *ArgII = ArgIt->getIdentifierInfo();
+ if (!ArgII) {
+ Printer.printToken(*ArgIt);
+ continue;
+ }
+
+ const auto *MI = PP.getMacroInfo(ArgII);
+ if (!MI) {
+ Printer.printToken(*ArgIt);
+ continue;
+ }
+
+ getMacroNameAndPrintExpansion(Printer, ArgIt->getLocation(), PP,
+ Info.Args);
+ if (MI->getNumParams() != 0)
+ ArgIt = getMatchingRParen(++ArgIt, ArgEnd);
+ }
+ continue;
+ }
+
+ // If control reached here, then this token isn't a macro identifier, nor an
+ // unexpanded macro argument that we need to handle, print it.
+ Printer.printToken(T);
+ }
+
+ return Info.Name;
+}
+
+static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
+ const Preprocessor &PP) {
+
+ const SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
+
+ // First, we create a Lexer to lex *at the expansion location* the tokens
+ // referring to the macro's name and its arguments.
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(ExpanLoc);
+ const llvm::MemoryBuffer *MB = SM.getBuffer(LocInfo.first);
+ const char *MacroNameTokenPos = MB->getBufferStart() + LocInfo.second;
+
+ Lexer RawLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
+ MB->getBufferStart(), MacroNameTokenPos, MB->getBufferEnd());
+
+ // Acquire the macro's name.
+ Token TheTok;
+ RawLexer.LexFromRawLexer(TheTok);
+
+ std::string MacroName = PP.getSpelling(TheTok);
+
+ const auto *II = PP.getIdentifierInfo(MacroName);
+ assert(II && "Failed to acquire the IndetifierInfo for the macro!");
+
+ const MacroInfo *MI = getMacroInfoForLocation(PP, SM, II, ExpanLoc);
+ assert(MI && "The macro must've been defined at it's expansion location!");
+
+ // Acquire the macro's arguments.
+ //
+ // The rough idea here is to lex from the first left parentheses to the last
+ // right parentheses, and map the macro's unexpanded arguments to what they
+ // will be expanded to. An expanded macro argument may contain several tokens
+ // (like '3 + 4'), so we'll lex until we find a tok::comma or tok::r_paren, at
+ // which point we start lexing the next argument or finish.
+ ArrayRef<const IdentifierInfo *> MacroArgs = MI->params();
+ if (MacroArgs.empty())
+ return { MacroName, MI, {} };
+
+ RawLexer.LexFromRawLexer(TheTok);
+ assert(TheTok.is(tok::l_paren) &&
+ "The token after the macro's identifier token should be '('!");
+
+ MacroArgMap Args;
+
+ // When the macro's argument is a function call, like
+ // CALL_FN(someFunctionName(param1, param2))
+ // we will find tok::l_paren, tok::r_paren, and tok::comma that do not divide
+ // actual macro arguments, or do not represent the macro argument's closing
+ // parentheses, so we'll count how many parentheses aren't closed yet.
+ // If ParanthesesDepth
+ // * = 0, then there are no more arguments to lex.
+ // * = 1, then if we find a tok::comma, we can start lexing the next arg.
+ // * > 1, then tok::comma is a part of the current arg.
+ int ParenthesesDepth = 1;
+
+ // If we encounter __VA_ARGS__, we will lex until the closing tok::r_paren,
+ // even if we lex a tok::comma and ParanthesesDepth == 1.
+ const IdentifierInfo *__VA_ARGS__II = PP.getIdentifierInfo("__VA_ARGS__");
+
+ for (const IdentifierInfo *UnexpArgII : MacroArgs) {
+ MacroArgMap::mapped_type ExpandedArgTokens;
+
+ // One could also simply not supply a single argument to __VA_ARGS__ -- this
+ // results in a preprocessor warning, but is not an error:
+ // #define VARIADIC(ptr, ...) \
+ // someVariadicTemplateFunction(__VA_ARGS__)
+ //
+ // int *ptr;
+ // VARIADIC(ptr); // Note that there are no commas, this isn't just an
+ // // empty parameter -- there are no parameters for '...'.
+ // In any other case, ParenthesesDepth mustn't be 0 here.
+ if (ParenthesesDepth != 0) {
+
+ // Lex the first token of the next macro parameter.
+ RawLexer.LexFromRawLexer(TheTok);
+
+ while (!(ParenthesesDepth == 1 &&
+ (UnexpArgII == __VA_ARGS__II ? false : TheTok.is(tok::comma)))) {
+ assert(TheTok.isNot(tok::eof) &&
+ "EOF encountered while looking for expanded macro args!");
+
+ if (TheTok.is(tok::l_paren))
+ ++ParenthesesDepth;
+
+ if (TheTok.is(tok::r_paren))
+ --ParenthesesDepth;
+
+ if (ParenthesesDepth == 0)
+ break;
+
+ if (TheTok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(TheTok);
+
+ ExpandedArgTokens.push_back(TheTok);
+ RawLexer.LexFromRawLexer(TheTok);
+ }
+ } else {
+ assert(UnexpArgII == __VA_ARGS__II);
+ }
+
+ Args.emplace(UnexpArgII, std::move(ExpandedArgTokens));
+ }
+
+ assert(TheTok.is(tok::r_paren) &&
+ "Expanded macro argument acquisition failed! After the end of the loop"
+ " this token should be ')'!");
+
+ return { MacroName, MI, Args };
+}
+
+static MacroInfo::tokens_iterator getMatchingRParen(
+ MacroInfo::tokens_iterator It,
+ MacroInfo::tokens_iterator End) {
+
+ assert(It->is(tok::l_paren) && "This token should be '('!");
+
+ // Skip until we find the closing ')'.
+ int ParenthesesDepth = 1;
+ while (ParenthesesDepth != 0) {
+ ++It;
+
+ assert(It->isNot(tok::eof) &&
+ "Encountered EOF while attempting to skip macro arguments!");
+ assert(It != End &&
+ "End of the macro definition reached before finding ')'!");
+
+ if (It->is(tok::l_paren))
+ ++ParenthesesDepth;
+
+ if (It->is(tok::r_paren))
+ --ParenthesesDepth;
+ }
+ return It;
+}
+
+static const MacroInfo *getMacroInfoForLocation(const Preprocessor &PP,
+ const SourceManager &SM,
+ const IdentifierInfo *II,
+ SourceLocation Loc) {
+
+ const MacroDirective *MD = PP.getLocalMacroDirectiveHistory(II);
+ if (!MD)
+ return nullptr;
+
+ return MD->findDirectiveAtLoc(Loc, SM).getMacroInfo();
+}
+
+void MacroArgMap::expandFromPrevMacro(const MacroArgMap &Super) {
+
+ for (value_type &Pair : *this) {
+ ExpArgTokens &CurrExpArgTokens = Pair.second;
+
+ // For each token in the expanded macro argument.
+ auto It = CurrExpArgTokens.begin();
+ while (It != CurrExpArgTokens.end()) {
+ if (It->isNot(tok::identifier)) {
+ ++It;
+ continue;
+ }
+
+ const auto *II = It->getIdentifierInfo();
+ assert(II);
+
+ // Is this an argument that "Super" expands further?
+ if (!Super.count(II)) {
+ ++It;
+ continue;
+ }
+
+ const ExpArgTokens &SuperExpArgTokens = Super.at(II);
+
+ It = CurrExpArgTokens.insert(
+ It, SuperExpArgTokens.begin(), SuperExpArgTokens.end());
+ std::advance(It, SuperExpArgTokens.size());
+ It = CurrExpArgTokens.erase(It);
+ }
+ }
+}
+
+void TokenPrinter::printToken(const Token &Tok) {
+ // If this is the first token to be printed, don't print space.
+ if (PrevTok.isNot(tok::unknown)) {
+ // If the tokens were already space separated, or if they must be to avoid
+ // them being implicitly pasted, add a space between them.
+ if(Tok.hasLeadingSpace() || ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok,
+ Tok)) {
+ // AvoidConcat doesn't check for ##, don't print a space around it.
+ if (PrevTok.isNot(tok::hashhash) && Tok.isNot(tok::hashhash)) {
+ OS << ' ';
+ }
+ }
+ }
+
+ if (!Tok.isOneOf(tok::hash, tok::hashhash)) {
+ if (PrevTok.is(tok::hash))
+ OS << '\"' << PP.getSpelling(Tok) << '\"';
+ else
+ OS << PP.getSpelling(Tok);
+ }
+
+ PrevPrevTok = PrevTok;
+ PrevTok = Tok;
+}
diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp
index 93d08b1aa5..ceb4fbdf78 100644
--- a/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -70,10 +70,7 @@ ProgramState::~ProgramState() {
}
int64_t ProgramState::getID() const {
- Optional<int64_t> Out = getStateManager().Alloc.identifyObject(this);
- assert(Out && "Wrong allocator used");
- assert(*Out % alignof(ProgramState) == 0 && "Wrong alignment information");
- return *Out / alignof(ProgramState);
+ return getStateManager().Alloc.identifyKnownAlignedObject<ProgramState>(this);
}
ProgramStateManager::ProgramStateManager(ASTContext &Ctx,
@@ -662,22 +659,12 @@ bool ProgramState::scanReachableSymbols(SVal val, SymbolVisitor& visitor) const
return S.scan(val);
}
-bool ProgramState::scanReachableSymbols(const SVal *I, const SVal *E,
- SymbolVisitor &visitor) const {
+bool ProgramState::scanReachableSymbols(
+ llvm::iterator_range<region_iterator> Reachable,
+ SymbolVisitor &visitor) const {
ScanReachableSymbols S(this, visitor);
- for ( ; I != E; ++I) {
- if (!S.scan(*I))
- return false;
- }
- return true;
-}
-
-bool ProgramState::scanReachableSymbols(const MemRegion * const *I,
- const MemRegion * const *E,
- SymbolVisitor &visitor) const {
- ScanReachableSymbols S(this, visitor);
- for ( ; I != E; ++I) {
- if (!S.scan(*I))
+ for (const MemRegion *R : Reachable) {
+ if (!S.scan(R))
return false;
}
return true;
@@ -845,4 +832,3 @@ bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
return false;
}
-
diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index e8c7bdbde3..d9b58d0f51 100644
--- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -399,7 +399,7 @@ RangeConstraintManager::removeDeadBindings(ProgramStateRef State,
for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
SymbolRef Sym = I.getKey();
- if (SymReaper.maybeDead(Sym)) {
+ if (SymReaper.isDead(Sym)) {
Changed = true;
CR = CRFactory.remove(CR, Sym);
}
diff --git a/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
index f99853f070..146dc20ad0 100644
--- a/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
@@ -200,6 +200,11 @@ void RangedConstraintManager::computeAdjustment(SymbolRef &Sym,
}
}
+void *ProgramStateTrait<ConstraintRange>::GDMIndex() {
+ static int Index;
+ return &Index;
+}
+
} // end of namespace ento
} // end of namespace clang
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index aa3b4044dc..f5eb9b5e72 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -350,7 +350,7 @@ public:
if (SubEngine *Eng = StateMgr.getOwningEngine()) {
AnalyzerOptions &Options = Eng->getAnalysisManager().options;
SmallStructLimit =
- Options.getOptionAsInteger("region-store-small-struct-limit", 2);
+ Options.RegionStoreSmallStructLimit;
}
}
@@ -1330,11 +1330,11 @@ RegionStoreManager::invalidateRegions(Store store,
case GFK_All:
B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
Ex, Count, LCtx, B, Invalidated);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case GFK_SystemOnly:
B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
Ex, Count, LCtx, B, Invalidated);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case GFK_None:
break;
}
@@ -2571,24 +2571,9 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
const MemRegion *Base = I.getKey();
// If the cluster has been visited, we know the region has been marked.
- if (W.isVisited(Base))
- continue;
-
- // Remove the dead entry.
- B = B.remove(Base);
-
- if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(Base))
- SymReaper.maybeDead(SymR->getSymbol());
-
- // Mark all non-live symbols that this binding references as dead.
- const ClusterBindings &Cluster = I.getData();
- for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
- CI != CE; ++CI) {
- SVal X = CI.getData();
- SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
- for (; SI != SE; ++SI)
- SymReaper.maybeDead(*SI);
- }
+ // Otherwise, remove the dead entry.
+ if (!W.isVisited(Base))
+ B = B.remove(Base);
}
return StoreRef(B.asStore(), *this);
diff --git a/lib/StaticAnalyzer/Core/RetainSummaryManager.cpp b/lib/StaticAnalyzer/Core/RetainSummaryManager.cpp
index c16b141e38..3bbb4c7f9a 100644
--- a/lib/StaticAnalyzer/Core/RetainSummaryManager.cpp
+++ b/lib/StaticAnalyzer/Core/RetainSummaryManager.cpp
@@ -8,8 +8,8 @@
//===----------------------------------------------------------------------===//
//
// This file defines summaries implementation for retain counting, which
-// implements a reference count checker for Core Foundation and Cocoa
-// on (Mac OS X).
+// implements a reference count checker for Core Foundation, Cocoa
+// and OSObject (on Mac OS X).
//
//===----------------------------------------------------------------------===//
@@ -24,6 +24,31 @@
using namespace clang;
using namespace ento;
+template <class T>
+constexpr static bool isOneOf() {
+ return false;
+}
+
+/// Helper function to check whether the class is one of the
+/// rest of varargs.
+template <class T, class P, class... ToCompare>
+constexpr static bool isOneOf() {
+ return std::is_same<T, P>::value || isOneOf<T, ToCompare...>();
+}
+
+template <class T> bool RetainSummaryManager::isAttrEnabled() {
+ if (isOneOf<T, CFConsumedAttr, CFReturnsRetainedAttr,
+ CFReturnsNotRetainedAttr, NSConsumedAttr, NSConsumesSelfAttr,
+ NSReturnsAutoreleasedAttr, NSReturnsRetainedAttr,
+ NSReturnsNotRetainedAttr>()) {
+ return TrackObjCAndCFObjects;
+ } else if (isOneOf<T, OSConsumedAttr, OSConsumesThisAttr,
+ OSReturnsNotRetainedAttr, OSReturnsRetainedAttr>()) {
+ return TrackOSObjects;
+ }
+ llvm_unreachable("Unexpected attribute passed");
+}
+
ArgEffects RetainSummaryManager::getArgEffects() {
ArgEffects AE = ScratchArgs;
ScratchArgs = AF.getEmptyMap();
@@ -65,6 +90,10 @@ static bool isOSObjectSubclass(const Decl *D) {
return isSubclass(D, "OSObject");
}
+static bool isOSObjectDynamicCast(StringRef S) {
+ return S == "safeMetaCast";
+}
+
static bool isOSIteratorSubclass(const Decl *D) {
return isSubclass(D, "OSIterator");
}
@@ -94,35 +123,78 @@ static bool isMakeCollectable(StringRef FName) {
return FName.contains_lower("MakeCollectable");
}
+/// A function is OSObject related if it is declared on a subclass
+/// of OSObject, or any of the parameters is a subclass of an OSObject.
+static bool isOSObjectRelated(const CXXMethodDecl *MD) {
+ if (isOSObjectSubclass(MD->getParent()))
+ return true;
+
+ for (ParmVarDecl *Param : MD->parameters()) {
+ QualType PT = Param->getType()->getPointeeType();
+ if (!PT.isNull())
+ if (CXXRecordDecl *RD = PT->getAsCXXRecordDecl())
+ if (isOSObjectSubclass(RD))
+ return true;
+ }
+
+ return false;
+}
+
const RetainSummary *
-RetainSummaryManager::generateSummary(const FunctionDecl *FD,
- bool &AllowAnnotations) {
- // We generate "stop" summaries for implicitly defined functions.
- if (FD->isImplicit()) {
- return getPersistentStopSummary();
+RetainSummaryManager::getSummaryForOSObject(const FunctionDecl *FD,
+ StringRef FName, QualType RetTy) {
+ if (RetTy->isPointerType()) {
+ const CXXRecordDecl *PD = RetTy->getPointeeType()->getAsCXXRecordDecl();
+ if (PD && isOSObjectSubclass(PD)) {
+ if (const IdentifierInfo *II = FD->getIdentifier()) {
+ if (isOSObjectDynamicCast(II->getName()))
+ return getDefaultSummary();
+
+ // All objects returned with functions *not* starting with
+ // get, or iterators, are returned at +1.
+ if ((!II->getName().startswith("get") &&
+ !II->getName().startswith("Get")) ||
+ isOSIteratorSubclass(PD)) {
+ return getOSSummaryCreateRule(FD);
+ } else {
+ return getOSSummaryGetRule(FD);
+ }
+ }
+ }
}
- // [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the
- // function's type.
- const FunctionType *FT = FD->getType()->getAs<FunctionType>();
- const IdentifierInfo *II = FD->getIdentifier();
- if (!II)
- return getDefaultSummary();
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXRecordDecl *Parent = MD->getParent();
+ if (TrackOSObjects && Parent && isOSObjectSubclass(Parent)) {
+ if (FName == "release")
+ return getOSSummaryReleaseRule(FD);
- StringRef FName = II->getName();
+ if (FName == "retain")
+ return getOSSummaryRetainRule(FD);
- // Strip away preceding '_'. Doing this here will effect all the checks
- // down below.
- FName = FName.substr(FName.find_first_not_of('_'));
+ if (FName == "free")
+ return getOSSummaryFreeRule(FD);
+
+ if (MD->getOverloadedOperator() == OO_New)
+ return getOSSummaryCreateRule(MD);
+ }
+ }
+
+ return nullptr;
+}
+
+const RetainSummary *RetainSummaryManager::getSummaryForObjCOrCFObject(
+ const FunctionDecl *FD,
+ StringRef FName,
+ QualType RetTy,
+ const FunctionType *FT,
+ bool &AllowAnnotations) {
- // Inspect the result type.
- QualType RetTy = FT->getReturnType();
std::string RetTyName = RetTy.getAsString();
// FIXME: This should all be refactored into a chain of "summary lookup"
// filters.
assert(ScratchArgs.isEmpty());
-
if (FName == "pthread_create" || FName == "pthread_setspecific") {
// Part of: <rdar://problem/7299394> and <rdar://problem/11282706>.
// This will be addressed better with IPA.
@@ -213,28 +285,11 @@ RetainSummaryManager::generateSummary(const FunctionDecl *FD,
if (RetTy->isPointerType()) {
- const CXXRecordDecl *PD = RetTy->getPointeeType()->getAsCXXRecordDecl();
- if (TrackOSObjects && PD && isOSObjectSubclass(PD)) {
- if (const IdentifierInfo *II = FD->getIdentifier()) {
-
- // All objects returned with functions starting with "get" are getters.
- if (II->getName().startswith("get")) {
-
- // ...except for iterators.
- if (isOSIteratorSubclass(PD))
- return getOSSummaryCreateRule(FD);
- return getOSSummaryGetRule(FD);
- } else {
- return getOSSummaryCreateRule(FD);
- }
- }
- }
-
// For CoreFoundation ('CF') types.
if (cocoa::isRefType(RetTy, "CF", FName)) {
if (isRetain(FD, FName)) {
- // CFRetain isn't supposed to be annotated. However, this may as well
- // be a user-made "safe" CFRetain function that is incorrectly
+ // CFRetain isn't supposed to be annotated. However, this may as
+ // well be a user-made "safe" CFRetain function that is incorrectly
// annotated as cf_returns_retained due to lack of better options.
// We want to ignore such annotation.
AllowAnnotations = false;
@@ -275,21 +330,9 @@ RetainSummaryManager::generateSummary(const FunctionDecl *FD,
}
}
- if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
- const CXXRecordDecl *Parent = MD->getParent();
- if (TrackOSObjects && Parent && isOSObjectSubclass(Parent)) {
- if (FName == "release")
- return getOSSummaryReleaseRule(FD);
-
- if (FName == "retain")
- return getOSSummaryRetainRule(FD);
- }
- }
-
// Check for release functions, the only kind of functions that we care
// about that don't return a pointer type.
- if (FName.size() >= 2 && FName[0] == 'C' &&
- (FName[1] == 'F' || FName[1] == 'G')) {
+ if (FName.startswith("CG") || FName.startswith("CF")) {
// Test for 'CGCF'.
FName = FName.substr(FName.startswith("CGCF") ? 4 : 2);
@@ -324,13 +367,41 @@ RetainSummaryManager::generateSummary(const FunctionDecl *FD,
}
}
- if (isa<CXXMethodDecl>(FD)) {
+ return nullptr;
+}
- // Stop tracking arguments passed to C++ methods, as those might be
- // wrapping smart pointers.
- return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, StopTracking,
- DoNothing);
- }
+const RetainSummary *
+RetainSummaryManager::generateSummary(const FunctionDecl *FD,
+ bool &AllowAnnotations) {
+ // We generate "stop" summaries for implicitly defined functions.
+ if (FD->isImplicit())
+ return getPersistentStopSummary();
+
+ const IdentifierInfo *II = FD->getIdentifier();
+
+ StringRef FName = II ? II->getName() : "";
+
+ // Strip away preceding '_'. Doing this here will effect all the checks
+ // down below.
+ FName = FName.substr(FName.find_first_not_of('_'));
+
+ // Inspect the result type. Strip away any typedefs.
+ const auto *FT = FD->getType()->getAs<FunctionType>();
+ QualType RetTy = FT->getReturnType();
+
+ if (TrackOSObjects)
+ if (const RetainSummary *S = getSummaryForOSObject(FD, FName, RetTy))
+ return S;
+
+ if (TrackObjCAndCFObjects)
+ if (const RetainSummary *S =
+ getSummaryForObjCOrCFObject(FD, FName, RetTy, FT, AllowAnnotations))
+ return S;
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (!(TrackOSObjects && isOSObjectRelated(MD)))
+ return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, StopTracking,
+ DoNothing);
return getDefaultSummary();
}
@@ -461,16 +532,14 @@ RetainSummaryManager::getSummary(const CallEvent &Call,
const RetainSummary *Summ;
switch (Call.getKind()) {
case CE_Function:
- Summ = getFunctionSummary(cast<SimpleFunctionCall>(Call).getDecl());
- break;
case CE_CXXMember:
- Summ = getFunctionSummary(cast<CXXMemberCall>(Call).getDecl());
- break;
case CE_CXXMemberOperator:
- case CE_Block:
case CE_CXXConstructor:
- case CE_CXXDestructor:
case CE_CXXAllocator:
+ Summ = getFunctionSummary(cast_or_null<FunctionDecl>(Call.getDecl()));
+ break;
+ case CE_Block:
+ case CE_CXXDestructor:
// FIXME: These calls are currently unsupported.
return getPersistentStopSummary();
case CE_ObjCMessage: {
@@ -503,26 +572,21 @@ bool RetainSummaryManager::isTrustedReferenceCountImplementation(
return hasRCAnnotation(FD, "rc_ownership_trusted_implementation");
}
-bool RetainSummaryManager::canEval(const CallExpr *CE,
- const FunctionDecl *FD,
- bool &hasTrustedImplementationAnnotation) {
- // For now, we're only handling the functions that return aliases of their
- // arguments: CFRetain (and its families).
- // Eventually we should add other functions we can model entirely,
- // such as CFRelease, which don't invalidate their arguments or globals.
- if (CE->getNumArgs() != 1)
- return false;
+Optional<RetainSummaryManager::BehaviorSummary>
+RetainSummaryManager::canEval(const CallExpr *CE, const FunctionDecl *FD,
+ bool &hasTrustedImplementationAnnotation) {
IdentifierInfo *II = FD->getIdentifier();
if (!II)
- return false;
+ return None;
StringRef FName = II->getName();
FName = FName.substr(FName.find_first_not_of('_'));
QualType ResultTy = CE->getCallReturnType(Ctx);
if (ResultTy->isObjCIdType()) {
- return II->isStr("NSMakeCollectable");
+ if (II->isStr("NSMakeCollectable"))
+ return BehaviorSummary::Identity;
} else if (ResultTy->isPointerType()) {
// Handle: (CF|CG|CV)Retain
// CFAutorelease
@@ -530,18 +594,34 @@ bool RetainSummaryManager::canEval(const CallExpr *CE,
if (cocoa::isRefType(ResultTy, "CF", FName) ||
cocoa::isRefType(ResultTy, "CG", FName) ||
cocoa::isRefType(ResultTy, "CV", FName))
- return isRetain(FD, FName) || isAutorelease(FD, FName) ||
- isMakeCollectable(FName);
+ if (isRetain(FD, FName) || isAutorelease(FD, FName) ||
+ isMakeCollectable(FName))
+ return BehaviorSummary::Identity;
+
+ // safeMetaCast is called by OSDynamicCast.
+ // We assume that OSDynamicCast is either an identity (cast is OK,
+ // the input was non-zero),
+ // or that it returns zero (when the cast failed, or the input
+ // was zero).
+ if (TrackOSObjects && isOSObjectDynamicCast(FName)) {
+ return BehaviorSummary::IdentityOrZero;
+ }
const FunctionDecl* FDD = FD->getDefinition();
if (FDD && isTrustedReferenceCountImplementation(FDD)) {
hasTrustedImplementationAnnotation = true;
- return true;
+ return BehaviorSummary::Identity;
}
}
- return false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXRecordDecl *Parent = MD->getParent();
+ if (TrackOSObjects && Parent && isOSObjectSubclass(Parent))
+ if (FName == "release" || FName == "retain")
+ return BehaviorSummary::NoOp;
+ }
+ return None;
}
const RetainSummary *
@@ -585,6 +665,14 @@ RetainSummaryManager::getOSSummaryReleaseRule(const FunctionDecl *FD) {
}
const RetainSummary *
+RetainSummaryManager::getOSSummaryFreeRule(const FunctionDecl *FD) {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ /*ReceiverEff=*/DoNothing,
+ /*DefaultEff=*/DoNothing,
+ /*ThisEff=*/Dealloc);
+}
+
+const RetainSummary *
RetainSummaryManager::getOSSummaryCreateRule(const FunctionDecl *FD) {
return getPersistentSummary(RetEffect::MakeOwned(RetEffect::OS));
}
@@ -618,7 +706,7 @@ RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
Optional<RetEffect>
RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
const Decl *D) {
- if (cocoa::isCocoaObjectRef(RetTy)) {
+ if (TrackObjCAndCFObjects && cocoa::isCocoaObjectRef(RetTy)) {
if (D->hasAttr<NSReturnsRetainedAttr>())
return ObjCAllocRetE;
@@ -630,17 +718,73 @@ RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
return None;
}
- if (D->hasAttr<CFReturnsRetainedAttr>())
+ if (hasEnabledAttr<CFReturnsRetainedAttr>(D)) {
return RetEffect::MakeOwned(RetEffect::CF);
- else if (hasRCAnnotation(D, "rc_ownership_returns_retained"))
+ } else if (hasEnabledAttr<OSReturnsRetainedAttr>(D)) {
+ return RetEffect::MakeOwned(RetEffect::OS);
+ } else if (hasRCAnnotation(D, "rc_ownership_returns_retained")) {
return RetEffect::MakeOwned(RetEffect::Generalized);
+ }
- if (D->hasAttr<CFReturnsNotRetainedAttr>())
+ if (hasEnabledAttr<CFReturnsNotRetainedAttr>(D)) {
return RetEffect::MakeNotOwned(RetEffect::CF);
+ } else if (hasEnabledAttr<OSReturnsNotRetainedAttr>(D)) {
+ return RetEffect::MakeNotOwned(RetEffect::OS);
+ } else if (hasRCAnnotation(D, "rc_ownership_returns_not_retained")) {
+ return RetEffect::MakeNotOwned(RetEffect::Generalized);
+ }
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
+ for (const auto *PD : MD->overridden_methods())
+ if (auto RE = getRetEffectFromAnnotations(RetTy, PD))
+ return RE;
return None;
}
+bool RetainSummaryManager::applyFunctionParamAnnotationEffect(const ParmVarDecl *pd,
+ unsigned parm_idx,
+ const FunctionDecl *FD,
+ ArgEffects::Factory &AF,
+ RetainSummaryTemplate &Template) {
+ if (hasEnabledAttr<NSConsumedAttr>(pd)) {
+ Template->addArg(AF, parm_idx, DecRefMsg);
+ return true;
+ } else if (hasEnabledAttr<CFConsumedAttr>(pd) ||
+ hasEnabledAttr<OSConsumedAttr>(pd) ||
+ hasRCAnnotation(pd, "rc_ownership_consumed")) {
+ Template->addArg(AF, parm_idx, DecRef);
+ return true;
+ } else if (hasEnabledAttr<CFReturnsRetainedAttr>(pd) ||
+ hasRCAnnotation(pd, "rc_ownership_returns_retained")) {
+ QualType PointeeTy = pd->getType()->getPointeeType();
+ if (!PointeeTy.isNull()) {
+ if (coreFoundation::isCFObjectRef(PointeeTy)) {
+ Template->addArg(AF, parm_idx, RetainedOutParameter);
+ return true;
+ }
+ }
+ } else if (hasEnabledAttr<CFReturnsNotRetainedAttr>(pd)) {
+ QualType PointeeTy = pd->getType()->getPointeeType();
+ if (!PointeeTy.isNull()) {
+ if (coreFoundation::isCFObjectRef(PointeeTy)) {
+ Template->addArg(AF, parm_idx, UnretainedOutParameter);
+ return true;
+ }
+ }
+ } else {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ for (const auto *OD : MD->overridden_methods()) {
+ const ParmVarDecl *OP = OD->parameters()[parm_idx];
+ if (applyFunctionParamAnnotationEffect(OP, parm_idx, OD, AF, Template))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
void
RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
const FunctionDecl *FD) {
@@ -652,31 +796,18 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
// Effects on the parameters.
unsigned parm_idx = 0;
- for (FunctionDecl::param_const_iterator pi = FD->param_begin(),
+ for (auto pi = FD->param_begin(),
pe = FD->param_end(); pi != pe; ++pi, ++parm_idx) {
const ParmVarDecl *pd = *pi;
- if (pd->hasAttr<NSConsumedAttr>())
- Template->addArg(AF, parm_idx, DecRefMsg);
- else if (pd->hasAttr<CFConsumedAttr>() ||
- hasRCAnnotation(pd, "rc_ownership_consumed"))
- Template->addArg(AF, parm_idx, DecRef);
- else if (pd->hasAttr<CFReturnsRetainedAttr>() ||
- hasRCAnnotation(pd, "rc_ownership_returns_retained")) {
- QualType PointeeTy = pd->getType()->getPointeeType();
- if (!PointeeTy.isNull())
- if (coreFoundation::isCFObjectRef(PointeeTy))
- Template->addArg(AF, parm_idx, RetainedOutParameter);
- } else if (pd->hasAttr<CFReturnsNotRetainedAttr>()) {
- QualType PointeeTy = pd->getType()->getPointeeType();
- if (!PointeeTy.isNull())
- if (coreFoundation::isCFObjectRef(PointeeTy))
- Template->addArg(AF, parm_idx, UnretainedOutParameter);
- }
+ applyFunctionParamAnnotationEffect(pd, parm_idx, FD, AF, Template);
}
QualType RetTy = FD->getReturnType();
if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
Template->setRetEffect(*RetE);
+
+ if (hasEnabledAttr<OSConsumesThisAttr>(FD))
+ Template->setThisEffect(DecRef);
}
void
@@ -694,13 +825,12 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
// Effects on the parameters.
unsigned parm_idx = 0;
- for (ObjCMethodDecl::param_const_iterator
- pi=MD->param_begin(), pe=MD->param_end();
+ for (auto pi=MD->param_begin(), pe=MD->param_end();
pi != pe; ++pi, ++parm_idx) {
const ParmVarDecl *pd = *pi;
- if (pd->hasAttr<NSConsumedAttr>())
+ if (pd->hasAttr<NSConsumedAttr>()) {
Template->addArg(AF, parm_idx, DecRefMsg);
- else if (pd->hasAttr<CFConsumedAttr>()) {
+ } else if (pd->hasAttr<CFConsumedAttr>() || pd->hasAttr<OSConsumedAttr>()) {
Template->addArg(AF, parm_idx, DecRef);
} else if (pd->hasAttr<CFReturnsRetainedAttr>()) {
QualType PointeeTy = pd->getType()->getPointeeType();
@@ -848,6 +978,10 @@ RetainSummaryManager::getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
const ObjCMethodDecl *MD, QualType RetTy,
ObjCMethodSummariesTy &CachedSummaries) {
+ // Objective-C method summaries are only applicable to ObjC and CF objects.
+ if (!TrackObjCAndCFObjects)
+ return getDefaultSummary();
+
// Look up a summary in our summary cache.
const RetainSummary *Summ = CachedSummaries.find(ID, S);
@@ -958,7 +1092,9 @@ void RetainSummaryManager::InitializeMethodSummaries() {
CallEffects CallEffects::getEffect(const ObjCMethodDecl *MD) {
ASTContext &Ctx = MD->getASTContext();
LangOptions L = Ctx.getLangOpts();
- RetainSummaryManager M(Ctx, L.ObjCAutoRefCount, /*TrackOSObjects=*/false);
+ RetainSummaryManager M(Ctx, L.ObjCAutoRefCount,
+ /*TrackNSAndCFObjects=*/true,
+ /*TrackOSObjects=*/false);
const RetainSummary *S = M.getMethodSummary(MD);
CallEffects CE(S->getRetEffect());
CE.Receiver = S->getReceiverEffect();
@@ -972,7 +1108,9 @@ CallEffects CallEffects::getEffect(const ObjCMethodDecl *MD) {
CallEffects CallEffects::getEffect(const FunctionDecl *FD) {
ASTContext &Ctx = FD->getASTContext();
LangOptions L = Ctx.getLangOpts();
- RetainSummaryManager M(Ctx, L.ObjCAutoRefCount, /*TrackOSObjects=*/false);
+ RetainSummaryManager M(Ctx, L.ObjCAutoRefCount,
+ /*TrackNSAndCFObjects=*/true,
+ /*TrackOSObjects=*/false);
const RetainSummary *S = M.getFunctionSummary(FD);
CallEffects CE(S->getRetEffect());
unsigned N = FD->param_size();
diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp
index ef3d5b7665..617c4ba27d 100644
--- a/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -362,9 +362,9 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
return None;
ASTContext &Ctx = getContext();
- llvm::APSInt Result;
+ Expr::EvalResult Result;
if (E->EvaluateAsInt(Result, Ctx))
- return makeIntVal(Result);
+ return makeIntVal(Result.Val.getInt());
if (Loc::isLocType(E->getType()))
if (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull))
@@ -385,7 +385,7 @@ SVal SValBuilder::makeSymExprValNN(BinaryOperator::Opcode Op,
// instead of generating an Unknown value and propagate the taint info to it.
const unsigned MaxComp = StateMgr.getOwningEngine()
->getAnalysisManager()
- .options.getMaxSymbolComplexity();
+ .options.MaxSymbolComplexity;
if (symLHS && symRHS &&
(symLHS->computeComplexity() + symRHS->computeComplexity()) < MaxComp)
diff --git a/lib/StaticAnalyzer/Core/SVals.cpp b/lib/StaticAnalyzer/Core/SVals.cpp
index 559ca2c984..b32be9e82d 100644
--- a/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/lib/StaticAnalyzer/Core/SVals.cpp
@@ -85,7 +85,7 @@ const FunctionDecl *SVal::getAsFunctionDecl() const {
SymbolRef SVal::getAsLocSymbol(bool IncludeBaseRegions) const {
// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
if (Optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
- return X->getLoc().getAsLocSymbol();
+ return X->getLoc().getAsLocSymbol(IncludeBaseRegions);
if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
const MemRegion *R = X->getRegion();
diff --git a/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
new file mode 100644
index 0000000000..36046f0cfd
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -0,0 +1,332 @@
+//===--- SarifDiagnostics.cpp - Sarif Diagnostics for Paths -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SarifDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Version.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/Path.h"
+
+using namespace llvm;
+using namespace clang;
+using namespace ento;
+
+namespace {
+class SarifDiagnostics : public PathDiagnosticConsumer {
+ std::string OutputFile;
+
+public:
+ SarifDiagnostics(AnalyzerOptions &, const std::string &Output)
+ : OutputFile(Output) {}
+ ~SarifDiagnostics() override = default;
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *FM) override;
+
+ StringRef getName() const override { return "SarifDiagnostics"; }
+ PathGenerationScheme getGenerationScheme() const override { return Minimal; }
+ bool supportsLogicalOpControlFlow() const override { return true; }
+ bool supportsCrossFileDiagnostics() const override { return true; }
+};
+} // end anonymous namespace
+
+void ento::createSarifDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string &Output,
+ const Preprocessor &) {
+ C.push_back(new SarifDiagnostics(AnalyzerOpts, Output));
+}
+
+static StringRef getFileName(const FileEntry &FE) {
+ StringRef Filename = FE.tryGetRealPathName();
+ if (Filename.empty())
+ Filename = FE.getName();
+ return Filename;
+}
+
+static std::string percentEncodeURICharacter(char C) {
+ // RFC 3986 claims alpha, numeric, and this handful of
+ // characters are not reserved for the path component and
+ // should be written out directly. Otherwise, percent
+ // encode the character and write that out instead of the
+ // reserved character.
+ if (llvm::isAlnum(C) ||
+ StringRef::npos != StringRef("-._~:@!$&'()*+,;=").find(C))
+ return std::string(&C, 1);
+ return "%" + llvm::toHex(StringRef(&C, 1));
+}
+
+static std::string fileNameToURI(StringRef Filename) {
+ llvm::SmallString<32> Ret = StringRef("file://");
+
+ // Get the root name to see if it has a URI authority.
+ StringRef Root = sys::path::root_name(Filename);
+ if (Root.startswith("//")) {
+ // There is an authority, so add it to the URI.
+ Ret += Root.drop_front(2).str();
+ } else if (!Root.empty()) {
+ // There is no authority, so end the component and add the root to the URI.
+ Ret += Twine("/" + Root).str();
+ }
+
+ auto Iter = sys::path::begin(Filename), End = sys::path::end(Filename);
+ assert(Iter != End && "Expected there to be a non-root path component.");
+ // Add the rest of the path components, encoding any reserved characters;
+ // we skip past the first path component, as it was handled it above.
+ std::for_each(++Iter, End, [&Ret](StringRef Component) {
+ // For reasons unknown to me, we may get a backslash with Windows native
+ // paths for the initial backslash following the drive component, which
+ // we need to ignore as a URI path part.
+ if (Component == "\\")
+ return;
+
+ // Add the separator between the previous path part and the one being
+ // currently processed.
+ Ret += "/";
+
+ // URI encode the part.
+ for (char C : Component) {
+ Ret += percentEncodeURICharacter(C);
+ }
+ });
+
+ return Ret.str().str();
+}
+
+static json::Object createFileLocation(const FileEntry &FE) {
+ return json::Object{{"uri", fileNameToURI(getFileName(FE))}};
+}
+
+static json::Object createFile(const FileEntry &FE) {
+ return json::Object{{"fileLocation", createFileLocation(FE)},
+ {"roles", json::Array{"resultFile"}},
+ {"length", FE.getSize()},
+ {"mimeType", "text/plain"}};
+}
+
+static json::Object createFileLocation(const FileEntry &FE,
+ json::Array &Files) {
+ std::string FileURI = fileNameToURI(getFileName(FE));
+
+ // See if the Files array contains this URI already. If it does not, create
+ // a new file object to add to the array.
+ auto I = llvm::find_if(Files, [&](const json::Value &File) {
+ if (const json::Object *Obj = File.getAsObject()) {
+ if (const json::Object *FileLoc = Obj->getObject("fileLocation")) {
+ Optional<StringRef> URI = FileLoc->getString("uri");
+ return URI && URI->equals(FileURI);
+ }
+ }
+ return false;
+ });
+
+ // Calculate the index within the file location array so it can be stored in
+ // the JSON object.
+ auto Index = static_cast<unsigned>(std::distance(Files.begin(), I));
+ if (I == Files.end())
+ Files.push_back(createFile(FE));
+
+ return json::Object{{"uri", FileURI}, {"fileIndex", Index}};
+}
+
+static json::Object createTextRegion(SourceRange R, const SourceManager &SM) {
+ return json::Object{
+ {"startLine", SM.getExpansionLineNumber(R.getBegin())},
+ {"endLine", SM.getExpansionLineNumber(R.getEnd())},
+ {"startColumn", SM.getExpansionColumnNumber(R.getBegin())},
+ {"endColumn", SM.getExpansionColumnNumber(R.getEnd())}};
+}
+
+static json::Object createPhysicalLocation(SourceRange R, const FileEntry &FE,
+ const SourceManager &SMgr,
+ json::Array &Files) {
+ return json::Object{{{"fileLocation", createFileLocation(FE, Files)},
+ {"region", createTextRegion(R, SMgr)}}};
+}
+
+enum class Importance { Important, Essential, Unimportant };
+
+static StringRef importanceToStr(Importance I) {
+ switch (I) {
+ case Importance::Important:
+ return "important";
+ case Importance::Essential:
+ return "essential";
+ case Importance::Unimportant:
+ return "unimportant";
+ }
+ llvm_unreachable("Fully covered switch is not so fully covered");
+}
+
+static json::Object createThreadFlowLocation(json::Object &&Location,
+ Importance I) {
+ return json::Object{{"location", std::move(Location)},
+ {"importance", importanceToStr(I)}};
+}
+
+static json::Object createMessage(StringRef Text) {
+ return json::Object{{"text", Text.str()}};
+}
+
+static json::Object createLocation(json::Object &&PhysicalLocation,
+ StringRef Message = "") {
+ json::Object Ret{{"physicalLocation", std::move(PhysicalLocation)}};
+ if (!Message.empty())
+ Ret.insert({"message", createMessage(Message)});
+ return Ret;
+}
+
+static Importance calculateImportance(const PathDiagnosticPiece &Piece) {
+ switch (Piece.getKind()) {
+ case PathDiagnosticPiece::Kind::Call:
+ case PathDiagnosticPiece::Kind::Macro:
+ case PathDiagnosticPiece::Kind::Note:
+ // FIXME: What should be reported here?
+ break;
+ case PathDiagnosticPiece::Kind::Event:
+ return Piece.getTagStr() == "ConditionBRVisitor" ? Importance::Important
+ : Importance::Essential;
+ case PathDiagnosticPiece::Kind::ControlFlow:
+ return Importance::Unimportant;
+ }
+ return Importance::Unimportant;
+}
+
+static json::Object createThreadFlow(const PathPieces &Pieces,
+ json::Array &Files) {
+ const SourceManager &SMgr = Pieces.front()->getLocation().getManager();
+ json::Array Locations;
+ for (const auto &Piece : Pieces) {
+ const PathDiagnosticLocation &P = Piece->getLocation();
+ Locations.push_back(createThreadFlowLocation(
+ createLocation(createPhysicalLocation(P.asRange(),
+ *P.asLocation().getFileEntry(),
+ SMgr, Files),
+ Piece->getString()),
+ calculateImportance(*Piece)));
+ }
+ return json::Object{{"locations", std::move(Locations)}};
+}
+
+static json::Object createCodeFlow(const PathPieces &Pieces,
+ json::Array &Files) {
+ return json::Object{
+ {"threadFlows", json::Array{createThreadFlow(Pieces, Files)}}};
+}
+
+static json::Object createTool() {
+ return json::Object{{"name", "clang"},
+ {"fullName", "clang static analyzer"},
+ {"language", "en-US"},
+ {"version", getClangFullVersion()}};
+}
+
+static json::Object createResult(const PathDiagnostic &Diag, json::Array &Files,
+ const StringMap<unsigned> &RuleMapping) {
+ const PathPieces &Path = Diag.path.flatten(false);
+ const SourceManager &SMgr = Path.front()->getLocation().getManager();
+
+ auto Iter = RuleMapping.find(Diag.getCheckName());
+ assert(Iter != RuleMapping.end() && "Rule ID is not in the array index map?");
+
+ return json::Object{
+ {"message", createMessage(Diag.getVerboseDescription())},
+ {"codeFlows", json::Array{createCodeFlow(Path, Files)}},
+ {"locations",
+ json::Array{createLocation(createPhysicalLocation(
+ Diag.getLocation().asRange(),
+ *Diag.getLocation().asLocation().getFileEntry(), SMgr, Files))}},
+ {"ruleIndex", Iter->getValue()},
+ {"ruleId", Diag.getCheckName()}};
+}
+
+static StringRef getRuleDescription(StringRef CheckName) {
+ return llvm::StringSwitch<StringRef>(CheckName)
+#define GET_CHECKERS
+#define CHECKER(FULLNAME, CLASS, HELPTEXT) \
+ .Case(FULLNAME, HELPTEXT)
+#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
+#undef CHECKER
+#undef GET_CHECKERS
+ ;
+}
+
+static json::Object createRule(const PathDiagnostic &Diag) {
+ StringRef CheckName = Diag.getCheckName();
+ return json::Object{
+ {"fullDescription", createMessage(getRuleDescription(CheckName))},
+ {"name", createMessage(CheckName)},
+ {"id", CheckName}};
+}
+
+static json::Array createRules(std::vector<const PathDiagnostic *> &Diags,
+ StringMap<unsigned> &RuleMapping) {
+ json::Array Rules;
+ llvm::StringSet<> Seen;
+
+ llvm::for_each(Diags, [&](const PathDiagnostic *D) {
+ StringRef RuleID = D->getCheckName();
+ std::pair<llvm::StringSet<>::iterator, bool> P = Seen.insert(RuleID);
+ if (P.second) {
+ RuleMapping[RuleID] = Rules.size(); // Maps RuleID to an Array Index.
+ Rules.push_back(createRule(*D));
+ }
+ });
+
+ return Rules;
+}
+
+static json::Object createResources(std::vector<const PathDiagnostic *> &Diags,
+ StringMap<unsigned> &RuleMapping) {
+ return json::Object{{"rules", createRules(Diags, RuleMapping)}};
+}
+
+static json::Object createRun(std::vector<const PathDiagnostic *> &Diags) {
+ json::Array Results, Files;
+ StringMap<unsigned> RuleMapping;
+ json::Object Resources = createResources(Diags, RuleMapping);
+
+ llvm::for_each(Diags, [&](const PathDiagnostic *D) {
+ Results.push_back(createResult(*D, Files, RuleMapping));
+ });
+
+ return json::Object{{"tool", createTool()},
+ {"resources", std::move(Resources)},
+ {"results", std::move(Results)},
+ {"files", std::move(Files)}};
+}
+
+void SarifDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags, FilesMade *) {
+ // We currently overwrite the file if it already exists. However, it may be
+ // useful to add a feature someday that allows the user to append a run to an
+ // existing SARIF file. One danger from that approach is that the size of the
+ // file can become large very quickly, so decoding into JSON to append a run
+ // may be an expensive operation.
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ llvm::errs() << "warning: could not create file: " << EC.message() << '\n';
+ return;
+ }
+ json::Object Sarif{
+ {"$schema",
+ "http://json.schemastore.org/sarif-2.0.0-csd.2.beta.2018-11-28"},
+ {"version", "2.0.0-csd.2.beta.2018-11-28"},
+ {"runs", json::Array{createRun(Diags)}}};
+ OS << llvm::formatv("{0:2}", json::Value(std::move(Sarif)));
+}
diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 36a0f4e6d8..2f6a0c8ffc 100644
--- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -459,7 +459,7 @@ static Optional<NonLoc> tryRearrange(ProgramStateRef State,
// FIXME: After putting complexity threshold to the symbols we can always
// rearrange additive operations but rearrange comparisons only if
// option is set.
- if(!Opts.shouldAggressivelySimplifyBinaryOperation())
+ if(!Opts.ShouldAggressivelySimplifyBinaryOperation)
return None;
SymbolRef LSym = Lhs.getAsSymbol();
@@ -475,9 +475,6 @@ static Optional<NonLoc> tryRearrange(ProgramStateRef State,
SingleTy = ResultTy;
if (LSym->getType() != SingleTy)
return None;
- // Substracting unsigned integers is a nightmare.
- if (!SingleTy->isSignedIntegerOrEnumerationType())
- return None;
} else {
// Don't rearrange other operations.
return None;
@@ -485,6 +482,10 @@ static Optional<NonLoc> tryRearrange(ProgramStateRef State,
assert(!SingleTy.isNull() && "We should have figured out the type by now!");
+ // Rearrange signed symbolic expressions only
+ if (!SingleTy->isSignedIntegerOrEnumerationType())
+ return None;
+
SymbolRef RSym = Rhs.getAsSymbol();
if (!RSym || RSym->getType() != SingleTy)
return None;
@@ -624,7 +625,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case BO_LE:
case BO_GE:
op = BinaryOperator::reverseComparisonOp(op);
- // FALL-THROUGH
+ LLVM_FALLTHROUGH;
case BO_EQ:
case BO_NE:
case BO_Add:
@@ -638,7 +639,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
// (~0)>>a
if (LHSValue.isAllOnesValue() && LHSValue.isSigned())
return evalCastFromNonLoc(lhs, resultTy);
- // FALL-THROUGH
+ LLVM_FALLTHROUGH;
case BO_Shl:
// 0<<a and 0>>a
if (LHSValue == 0)
diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp
index cc9939a68d..794fd84364 100644
--- a/lib/StaticAnalyzer/Core/Store.cpp
+++ b/lib/StaticAnalyzer/Core/Store.cpp
@@ -88,7 +88,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
return R;
// We don't know what to make of it. Return a NULL region, which
- // will be interpretted as UnknownVal.
+ // will be interpreted as UnknownVal.
return nullptr;
}
diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp
index ec1224e52b..66273f099a 100644
--- a/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -83,10 +83,13 @@ void SymbolCast::dumpToStream(raw_ostream &os) const {
}
void SymbolConjured::dumpToStream(raw_ostream &os) const {
- os << "conj_$" << getSymbolID() << '{' << T.getAsString()
- << ", LC" << LCtx->getID() << ", S" << S->getID(
- LCtx->getDecl()->getASTContext()) << ", #" << Count
- << '}';
+ os << "conj_$" << getSymbolID() << '{' << T.getAsString() << ", LC"
+ << LCtx->getID();
+ if (S)
+ os << ", S" << S->getID(LCtx->getDecl()->getASTContext());
+ else
+ os << ", no stmt";
+ os << ", #" << Count << '}';
}
void SymbolDerived::dumpToStream(raw_ostream &os) const {
@@ -398,7 +401,6 @@ void SymbolReaper::markDependentsLive(SymbolRef sym) {
void SymbolReaper::markLive(SymbolRef sym) {
TheLiving[sym] = NotProcessed;
- TheDead.erase(sym);
markDependentsLive(sym);
}
@@ -423,14 +425,6 @@ void SymbolReaper::markInUse(SymbolRef sym) {
MetadataInUse.insert(sym);
}
-bool SymbolReaper::maybeDead(SymbolRef sym) {
- if (isLive(sym))
- return false;
-
- TheDead.insert(sym);
- return true;
-}
-
bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
if (RegionRoots.count(MR))
return true;
diff --git a/lib/StaticAnalyzer/Core/TaintManager.cpp b/lib/StaticAnalyzer/Core/TaintManager.cpp
new file mode 100644
index 0000000000..c34b0ca183
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/TaintManager.cpp
@@ -0,0 +1,23 @@
+//== TaintManager.cpp ------------------------------------------ -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h"
+
+using namespace clang;
+using namespace ento;
+
+void *ProgramStateTrait<TaintMap>::GDMIndex() {
+ static int index = 0;
+ return &index;
+}
+
+void *ProgramStateTrait<DerivedSymTaint>::GDMIndex() {
+ static int index;
+ return &index;
+}
diff --git a/lib/StaticAnalyzer/Core/WorkList.cpp b/lib/StaticAnalyzer/Core/WorkList.cpp
index 4b227375da..e705393cb8 100644
--- a/lib/StaticAnalyzer/Core/WorkList.cpp
+++ b/lib/StaticAnalyzer/Core/WorkList.cpp
@@ -152,7 +152,7 @@ public:
auto BE = N->getLocation().getAs<BlockEntrance>();
if (!BE) {
- // Assume the choice of the order of the preceeding block entrance was
+ // Assume the choice of the order of the preceding block entrance was
// correct.
StackUnexplored.push_back(U);
} else {
@@ -252,3 +252,63 @@ public:
std::unique_ptr<WorkList> WorkList::makeUnexploredFirstPriorityQueue() {
return llvm::make_unique<UnexploredFirstPriorityQueue>();
}
+
+namespace {
+class UnexploredFirstPriorityLocationQueue : public WorkList {
+ using LocIdentifier = const CFGBlock *;
+
+ // How many times each location was visited.
+ // Is signed because we negate it later in order to have a reversed
+ // comparison.
+ using VisitedTimesMap = llvm::DenseMap<LocIdentifier, int>;
+
+ // Compare by number of times the location was visited first (negated
+ // to prefer less often visited locations), then by insertion time (prefer
+ // expanding nodes inserted sooner first).
+ using QueuePriority = std::pair<int, unsigned long>;
+ using QueueItem = std::pair<WorkListUnit, QueuePriority>;
+
+ struct ExplorationComparator {
+ bool operator() (const QueueItem &LHS, const QueueItem &RHS) {
+ return LHS.second < RHS.second;
+ }
+ };
+
+ // Number of inserted nodes, used to emulate DFS ordering in the priority
+ // queue when insertions are equal.
+ unsigned long Counter = 0;
+
+ // Number of times a current location was reached.
+ VisitedTimesMap NumReached;
+
+ // The top item is the largest one.
+ llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, ExplorationComparator>
+ queue;
+
+public:
+ bool hasWork() const override {
+ return !queue.empty();
+ }
+
+ void enqueue(const WorkListUnit &U) override {
+ const ExplodedNode *N = U.getNode();
+ unsigned NumVisited = 0;
+ if (auto BE = N->getLocation().getAs<BlockEntrance>())
+ NumVisited = NumReached[BE->getBlock()]++;
+
+ queue.push(std::make_pair(U, std::make_pair(-NumVisited, ++Counter)));
+ }
+
+ WorkListUnit dequeue() override {
+ QueueItem U = queue.top();
+ queue.pop();
+ return U.first;
+ }
+
+};
+
+}
+
+std::unique_ptr<WorkList> WorkList::makeUnexploredFirstPriorityLocationQueue() {
+ return llvm::make_unique<UnexploredFirstPriorityLocationQueue>();
+}
diff --git a/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp b/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
index 16ab6a33eb..c4729f969f 100644
--- a/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
@@ -46,7 +46,7 @@ public:
// Function used to report errors
void Z3ErrorHandler(Z3_context Context, Z3_error_code Error) {
llvm::report_fatal_error("Z3 error: " +
- llvm::Twine(Z3_get_error_msg_ex(Context, Error)));
+ llvm::Twine(Z3_get_error_msg(Context, Error)));
}
/// Wrapper for Z3 context
@@ -77,32 +77,27 @@ class Z3Sort : public SMTSort {
public:
/// Default constructor, mainly used by make_shared
- Z3Sort(Z3Context &C, Z3_sort ZS) : SMTSort(), Context(C), Sort(ZS) {
+ Z3Sort(Z3Context &C, Z3_sort ZS) : Context(C), Sort(ZS) {
Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
}
/// Override implicit copy constructor for correct reference counting.
- Z3Sort(const Z3Sort &Copy)
- : SMTSort(), Context(Copy.Context), Sort(Copy.Sort) {
+ Z3Sort(const Z3Sort &Other) : Context(Other.Context), Sort(Other.Sort) {
Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
}
- /// Provide move constructor
- Z3Sort(Z3Sort &&Move) : SMTSort(), Context(Move.Context), Sort(nullptr) {
- *this = std::move(Move);
- }
-
- /// Provide move assignment constructor
- Z3Sort &operator=(Z3Sort &&Move) {
- if (this != &Move) {
- if (Sort)
- Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
- Sort = Move.Sort;
- Move.Sort = nullptr;
- }
+ /// Override implicit copy assignment constructor for correct reference
+ /// counting.
+ Z3Sort &operator=(const Z3Sort &Other) {
+ Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Other.Sort));
+ Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
+ Sort = Other.Sort;
return *this;
}
+ Z3Sort(Z3Sort &&Other) = delete;
+ Z3Sort &operator=(Z3Sort &&Other) = delete;
+
~Z3Sort() {
if (Sort)
Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
@@ -134,13 +129,6 @@ public:
static_cast<const Z3Sort &>(Other).Sort);
}
- Z3Sort &operator=(const Z3Sort &Move) {
- Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Move.Sort));
- Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
- Sort = Move.Sort;
- return *this;
- }
-
void print(raw_ostream &OS) const override {
OS << Z3_sort_to_string(Context.Context, Sort);
}
@@ -167,22 +155,18 @@ public:
Z3_inc_ref(Context.Context, AST);
}
- /// Provide move constructor
- Z3Expr(Z3Expr &&Move) : SMTExpr(), Context(Move.Context), AST(nullptr) {
- *this = std::move(Move);
- }
-
- /// Provide move assignment constructor
- Z3Expr &operator=(Z3Expr &&Move) {
- if (this != &Move) {
- if (AST)
- Z3_dec_ref(Context.Context, AST);
- AST = Move.AST;
- Move.AST = nullptr;
- }
+ /// Override implicit copy assignment constructor for correct reference
+ /// counting.
+ Z3Expr &operator=(const Z3Expr &Other) {
+ Z3_inc_ref(Context.Context, Other.AST);
+ Z3_dec_ref(Context.Context, AST);
+ AST = Other.AST;
return *this;
}
+ Z3Expr(Z3Expr &&Other) = delete;
+ Z3Expr &operator=(Z3Expr &&Other) = delete;
+
~Z3Expr() {
if (AST)
Z3_dec_ref(Context.Context, AST);
@@ -202,14 +186,6 @@ public:
static_cast<const Z3Expr &>(Other).AST);
}
- /// Override implicit move constructor for correct reference counting.
- Z3Expr &operator=(const Z3Expr &Move) {
- Z3_inc_ref(Context.Context, Move.AST);
- Z3_dec_ref(Context.Context, AST);
- AST = Move.AST;
- return *this;
- }
-
void print(raw_ostream &OS) const override {
OS << Z3_ast_to_string(Context.Context, AST);
}
@@ -228,30 +204,13 @@ class Z3Model {
public:
Z3Model(Z3Context &C, Z3_model ZM) : Context(C), Model(ZM) {
- assert(C.Context != nullptr);
Z3_model_inc_ref(Context.Context, Model);
}
- /// Override implicit copy constructor for correct reference counting.
- Z3Model(const Z3Model &Copy) : Context(Copy.Context), Model(Copy.Model) {
- Z3_model_inc_ref(Context.Context, Model);
- }
-
- /// Provide move constructor
- Z3Model(Z3Model &&Move) : Context(Move.Context), Model(nullptr) {
- *this = std::move(Move);
- }
-
- /// Provide move assignment constructor
- Z3Model &operator=(Z3Model &&Move) {
- if (this != &Move) {
- if (Model)
- Z3_model_dec_ref(Context.Context, Model);
- Model = Move.Model;
- Move.Model = nullptr;
- }
- return *this;
- }
+ Z3Model(const Z3Model &Other) = delete;
+ Z3Model(Z3Model &&Other) = delete;
+ Z3Model &operator=(Z3Model &Other) = delete;
+ Z3Model &operator=(Z3Model &&Other) = delete;
~Z3Model() {
if (Model)
@@ -310,32 +269,14 @@ class Z3Solver : public SMTSolver {
Z3_solver Solver;
public:
- Z3Solver() : SMTSolver(), Solver(Z3_mk_simple_solver(Context.Context)) {
+ Z3Solver() : Solver(Z3_mk_simple_solver(Context.Context)) {
Z3_solver_inc_ref(Context.Context, Solver);
}
- /// Override implicit copy constructor for correct reference counting.
- Z3Solver(const Z3Solver &Copy)
- : SMTSolver(), Context(Copy.Context), Solver(Copy.Solver) {
- Z3_solver_inc_ref(Context.Context, Solver);
- }
-
- /// Provide move constructor
- Z3Solver(Z3Solver &&Move)
- : SMTSolver(), Context(Move.Context), Solver(nullptr) {
- *this = std::move(Move);
- }
-
- /// Provide move assignment constructor
- Z3Solver &operator=(Z3Solver &&Move) {
- if (this != &Move) {
- if (Solver)
- Z3_solver_dec_ref(Context.Context, Solver);
- Solver = Move.Solver;
- Move.Solver = nullptr;
- }
- return *this;
- }
+ Z3Solver(const Z3Solver &Other) = delete;
+ Z3Solver(Z3Solver &&Other) = delete;
+ Z3Solver &operator=(Z3Solver &Other) = delete;
+ Z3Solver &operator=(Z3Solver &&Other) = delete;
~Z3Solver() {
if (Solver)
@@ -671,7 +612,7 @@ public:
toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
}
- SMTExprRef mkFPtoSBV(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef mkSBVtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
SMTExprRef RoundingMode = getFloatRoundingMode();
return newExprRef(Z3Expr(
Context,
@@ -679,7 +620,7 @@ public:
toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
}
- SMTExprRef mkFPtoUBV(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef mkUBVtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
SMTExprRef RoundingMode = getFloatRoundingMode();
return newExprRef(Z3Expr(
Context,
@@ -687,14 +628,14 @@ public:
toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
}
- SMTExprRef mkSBVtoFP(const SMTExprRef &From, unsigned ToWidth) override {
+ SMTExprRef mkFPtoSBV(const SMTExprRef &From, unsigned ToWidth) override {
SMTExprRef RoundingMode = getFloatRoundingMode();
return newExprRef(Z3Expr(
Context, Z3_mk_fpa_to_sbv(Context.Context, toZ3Expr(*RoundingMode).AST,
toZ3Expr(*From).AST, ToWidth)));
}
- SMTExprRef mkUBVtoFP(const SMTExprRef &From, unsigned ToWidth) override {
+ SMTExprRef mkFPtoUBV(const SMTExprRef &From, unsigned ToWidth) override {
SMTExprRef RoundingMode = getFloatRoundingMode();
return newExprRef(Z3Expr(
Context, Z3_mk_fpa_to_ubv(Context.Context, toZ3Expr(*RoundingMode).AST,
@@ -733,9 +674,11 @@ public:
llvm::APSInt getBitvector(const SMTExprRef &Exp, unsigned BitWidth,
bool isUnsigned) override {
- return llvm::APSInt(llvm::APInt(
- BitWidth, Z3_get_numeral_string(Context.Context, toZ3Expr(*Exp).AST),
- 10));
+ return llvm::APSInt(
+ llvm::APInt(BitWidth,
+ Z3_get_numeral_string(Context.Context, toZ3Expr(*Exp).AST),
+ 10),
+ isUnsigned);
}
bool getBoolean(const SMTExprRef &Exp) override {
@@ -747,36 +690,6 @@ public:
return newExprRef(Z3Expr(Context, Z3_mk_fpa_rne(Context.Context)));
}
- SMTExprRef fromBoolean(const bool Bool) override {
- Z3_ast AST =
- Bool ? Z3_mk_true(Context.Context) : Z3_mk_false(Context.Context);
- return newExprRef(Z3Expr(Context, AST));
- }
-
- SMTExprRef fromAPFloat(const llvm::APFloat &Float) override {
- SMTSortRef Sort =
- getFloatSort(llvm::APFloat::semanticsSizeInBits(Float.getSemantics()));
-
- llvm::APSInt Int = llvm::APSInt(Float.bitcastToAPInt(), false);
- SMTExprRef Z3Int = fromAPSInt(Int);
- return newExprRef(Z3Expr(
- Context, Z3_mk_fpa_to_fp_bv(Context.Context, toZ3Expr(*Z3Int).AST,
- toZ3Sort(*Sort).Sort)));
- }
-
- SMTExprRef fromAPSInt(const llvm::APSInt &Int) override {
- SMTSortRef Sort = getBitvectorSort(Int.getBitWidth());
- Z3_ast AST = Z3_mk_numeral(Context.Context, Int.toString(10).c_str(),
- toZ3Sort(*Sort).Sort);
- return newExprRef(Z3Expr(Context, AST));
- }
-
- SMTExprRef fromInt(const char *Int, uint64_t BitWidth) override {
- SMTSortRef Sort = getBitvectorSort(BitWidth);
- Z3_ast AST = Z3_mk_numeral(Context.Context, Int, toZ3Sort(*Sort).Sort);
- return newExprRef(Z3Expr(Context, AST));
- }
-
bool toAPFloat(const SMTSortRef &Sort, const SMTExprRef &AST,
llvm::APFloat &Float, bool useSemantics) {
assert(Sort->isFloatSort() && "Unsupported sort to floating-point!");
@@ -837,7 +750,7 @@ public:
}
bool getInterpretation(const SMTExprRef &Exp, llvm::APSInt &Int) override {
- Z3Model Model = getModel();
+ Z3Model Model(Context, Z3_solver_get_model(Context.Context, Solver));
Z3_func_decl Func = Z3_get_app_decl(
Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
@@ -851,7 +764,7 @@ public:
}
bool getInterpretation(const SMTExprRef &Exp, llvm::APFloat &Float) override {
- Z3Model Model = getModel();
+ Z3Model Model(Context, Z3_solver_get_model(Context.Context, Solver));
Z3_func_decl Func = Z3_get_app_decl(
Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
@@ -882,14 +795,10 @@ public:
return Z3_solver_pop(Context.Context, Solver, NumStates);
}
- /// Get a model from the solver. Caller should check the model is
- /// satisfiable.
- Z3Model getModel() {
- return Z3Model(Context, Z3_solver_get_model(Context.Context, Solver));
- }
+ bool isFPSupported() override { return true; }
/// Reset the solver and remove all constraints.
- void reset() const override { Z3_solver_reset(Context.Context, Solver); }
+ void reset() override { Z3_solver_reset(Context.Context, Solver); }
void print(raw_ostream &OS) const override {
OS << Z3_solver_to_string(Context.Context, Solver);
@@ -902,49 +811,6 @@ class Z3ConstraintManager : public SMTConstraintManager<ConstraintZ3, Z3Expr> {
public:
Z3ConstraintManager(SubEngine *SE, SValBuilder &SB)
: SMTConstraintManager(SE, SB, Solver) {}
-
- bool canReasonAbout(SVal X) const override {
- const TargetInfo &TI = getBasicVals().getContext().getTargetInfo();
-
- Optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
- if (!SymVal)
- return true;
-
- const SymExpr *Sym = SymVal->getSymbol();
- QualType Ty = Sym->getType();
-
- // Complex types are not modeled
- if (Ty->isComplexType() || Ty->isComplexIntegerType())
- return false;
-
- // Non-IEEE 754 floating-point types are not modeled
- if ((Ty->isSpecificBuiltinType(BuiltinType::LongDouble) &&
- (&TI.getLongDoubleFormat() == &llvm::APFloat::x87DoubleExtended() ||
- &TI.getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())))
- return false;
-
- if (isa<SymbolData>(Sym))
- return true;
-
- SValBuilder &SVB = getSValBuilder();
-
- if (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym))
- return canReasonAbout(SVB.makeSymbolVal(SC->getOperand()));
-
- if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
- if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(BSE))
- return canReasonAbout(SVB.makeSymbolVal(SIE->getLHS()));
-
- if (const IntSymExpr *ISE = dyn_cast<IntSymExpr>(BSE))
- return canReasonAbout(SVB.makeSymbolVal(ISE->getRHS()));
-
- if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(BSE))
- return canReasonAbout(SVB.makeSymbolVal(SSE->getLHS())) &&
- canReasonAbout(SVB.makeSymbolVal(SSE->getRHS()));
- }
-
- llvm_unreachable("Unsupported expression to reason about!");
- }
}; // end class Z3ConstraintManager
} // end anonymous namespace
@@ -956,7 +822,7 @@ SMTSolverRef clang::ento::CreateZ3Solver() {
return llvm::make_unique<Z3Solver>();
#else
llvm::report_fatal_error("Clang was not compiled with Z3 support, rebuild "
- "with -DCLANG_ANALYZER_BUILD_Z3=ON",
+ "with -DCLANG_ANALYZER_ENABLE_Z3_SOLVER=ON",
false);
return nullptr;
#endif
@@ -968,7 +834,7 @@ ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
return llvm::make_unique<Z3ConstraintManager>(Eng, StMgr.getSValBuilder());
#else
llvm::report_fatal_error("Clang was not compiled with Z3 support, rebuild "
- "with -DCLANG_ANALYZER_BUILD_Z3=ON",
+ "with -DCLANG_ANALYZER_ENABLE_Z3_SOLVER=ON",
false);
return nullptr;
#endif
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 4324ed2d96..d87937d9b6 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -204,7 +204,7 @@ public:
PP(CI.getPreprocessor()), OutDir(outdir), Opts(std::move(opts)),
Plugins(plugins), Injector(injector), CTU(CI) {
DigestAnalyzerOptions();
- if (Opts->PrintStats || Opts->shouldSerializeStats()) {
+ if (Opts->PrintStats || Opts->ShouldSerializeStats) {
AnalyzerTimers = llvm::make_unique<llvm::TimerGroup>(
"analyzer", "Analyzer timers");
TUTotalTimer = llvm::make_unique<llvm::Timer>(
@@ -739,7 +739,7 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
// Execute the worklist algorithm.
Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D),
- Mgr->options.getMaxNodesPerTopLevelFunction());
+ Mgr->options.MaxNodesPerTopLevelFunction);
if (!Mgr->options.DumpExplodedGraphTo.empty())
Eng.DumpGraph(Mgr->options.TrimGraph, Mgr->options.DumpExplodedGraphTo);
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index 76e66cc023..70997e34a1 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -17,11 +17,11 @@
#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
@@ -101,16 +101,6 @@ void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
<< pluginAPIVersion;
}
-static SmallVector<CheckerOptInfo, 8>
-getCheckerOptList(const AnalyzerOptions &opts) {
- SmallVector<CheckerOptInfo, 8> checkerOpts;
- for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
- const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
- checkerOpts.push_back(CheckerOptInfo(opt.first, opt.second));
- }
- return checkerOpts;
-}
-
std::unique_ptr<CheckerManager> ento::createCheckerManager(
ASTContext &context,
AnalyzerOptions &opts,
@@ -119,26 +109,15 @@ std::unique_ptr<CheckerManager> ento::createCheckerManager(
DiagnosticsEngine &diags) {
auto checkerMgr = llvm::make_unique<CheckerManager>(context, opts);
- SmallVector<CheckerOptInfo, 8> checkerOpts = getCheckerOptList(opts);
-
ClangCheckerRegistry allCheckers(plugins, &diags);
for (const auto &Fn : checkerRegistrationFns)
Fn(allCheckers);
- allCheckers.initializeManager(*checkerMgr, checkerOpts);
+ allCheckers.initializeManager(*checkerMgr, opts, diags);
allCheckers.validateCheckerOptions(opts, diags);
checkerMgr->finishedCheckerRegistration();
- for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
- if (checkerOpts[i].isUnclaimed()) {
- diags.Report(diag::err_unknown_analyzer_checker)
- << checkerOpts[i].getName();
- diags.Report(diag::note_suggest_disabling_all_checkers);
- }
-
- }
-
return checkerMgr;
}
@@ -154,6 +133,78 @@ void ento::printEnabledCheckerList(raw_ostream &out,
const AnalyzerOptions &opts) {
out << "OVERVIEW: Clang Static Analyzer Enabled Checkers List\n\n";
- SmallVector<CheckerOptInfo, 8> checkerOpts = getCheckerOptList(opts);
- ClangCheckerRegistry(plugins).printList(out, checkerOpts);
+ ClangCheckerRegistry(plugins).printList(out, opts);
+}
+
+void ento::printAnalyzerConfigList(raw_ostream &out) {
+ out << "OVERVIEW: Clang Static Analyzer -analyzer-config Option List\n\n";
+ out << "USAGE: clang -cc1 [CLANG_OPTIONS] -analyzer-config "
+ "<OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
+ out << " clang -cc1 [CLANG_OPTIONS] -analyzer-config OPTION1=VALUE, "
+ "-analyzer-config OPTION2=VALUE, ...\n\n";
+ out << " clang [CLANG_OPTIONS] -Xclang -analyzer-config -Xclang"
+ "<OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
+ out << " clang [CLANG_OPTIONS] -Xclang -analyzer-config -Xclang "
+ "OPTION1=VALUE, -Xclang -analyzer-config -Xclang "
+ "OPTION2=VALUE, ...\n\n";
+ out << "OPTIONS:\n\n";
+
+ using OptionAndDescriptionTy = std::pair<StringRef, std::string>;
+ OptionAndDescriptionTy PrintableOptions[] = {
+#define ANALYZER_OPTION(TYPE, NAME, CMDFLAG, DESC, DEFAULT_VAL) \
+ { \
+ CMDFLAG, \
+ llvm::Twine(llvm::Twine() + "(" + \
+ (StringRef(#TYPE) == "StringRef" ? "string" : #TYPE ) + \
+ ") " DESC \
+ " (default: " #DEFAULT_VAL ")").str() \
+ },
+
+#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
+ SHALLOW_VAL, DEEP_VAL) \
+ { \
+ CMDFLAG, \
+ llvm::Twine(llvm::Twine() + "(" + \
+ (StringRef(#TYPE) == "StringRef" ? "string" : #TYPE ) + \
+ ") " DESC \
+ " (default: " #SHALLOW_VAL " in shallow mode, " #DEEP_VAL \
+ " in deep mode)").str() \
+ },
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
+#undef ANALYZER_OPTION
+#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
+ };
+
+ llvm::sort(PrintableOptions, [](const OptionAndDescriptionTy &LHS,
+ const OptionAndDescriptionTy &RHS) {
+ return LHS.first < RHS.first;
+ });
+
+ constexpr size_t MinLineWidth = 70;
+ constexpr size_t PadForOpt = 2;
+ constexpr size_t OptionWidth = 30;
+ constexpr size_t PadForDesc = PadForOpt + OptionWidth;
+ static_assert(MinLineWidth > PadForDesc, "MinLineWidth must be greater!");
+
+ llvm::formatted_raw_ostream FOut(out);
+
+ for (const auto &Pair : PrintableOptions) {
+ FOut.PadToColumn(PadForOpt) << Pair.first;
+
+ // If the buffer's length is greater then PadForDesc, print a newline.
+ if (FOut.getColumn() > PadForDesc)
+ FOut << '\n';
+
+ FOut.PadToColumn(PadForDesc);
+
+ for (char C : Pair.second) {
+ if (FOut.getColumn() > MinLineWidth && C == ' ') {
+ FOut << '\n';
+ FOut.PadToColumn(PadForDesc);
+ continue;
+ }
+ FOut << C;
+ }
+ FOut << "\n\n";
+ }
}
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
index c43d30440c..b1927c8401 100644
--- a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
@@ -48,7 +48,7 @@ void ModelInjector::onBodySynthesis(const NamedDecl *D) {
FileID mainFileID = SM.getMainFileID();
AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
- llvm::StringRef modelPath = analyzerOpts->Config["model-path"];
+ llvm::StringRef modelPath = analyzerOpts->ModelPath;
llvm::SmallString<128> fileName;
diff --git a/lib/StaticAnalyzer/README.txt b/lib/StaticAnalyzer/README.txt
index d4310c57d8..79a16ec767 100644
--- a/lib/StaticAnalyzer/README.txt
+++ b/lib/StaticAnalyzer/README.txt
@@ -69,23 +69,23 @@ triggered the problem.
= Notes about C++ =
-Since now constructors are seen before the variable that is constructed
-in the CFG, we create a temporary object as the destination region that
+Since now constructors are seen before the variable that is constructed
+in the CFG, we create a temporary object as the destination region that
is constructed into. See ExprEngine::VisitCXXConstructExpr().
In ExprEngine::processCallExit(), we always bind the object region to the
evaluated CXXConstructExpr. Then in VisitDeclStmt(), we compute the
corresponding lazy compound value if the variable is not a reference, and
bind the variable region to the lazy compound value. If the variable
-is a reference, just use the object region as the initilizer value.
+is a reference, just use the object region as the initializer value.
Before entering a C++ method (or ctor/dtor), the 'this' region is bound
-to the object region. In ctors, we synthesize 'this' region with
+to the object region. In ctors, we synthesize 'this' region with
CXXRecordDecl*, which means we do not use type qualifiers. In methods, we
-synthesize 'this' region with CXXMethodDecl*, which has getThisType()
+synthesize 'this' region with CXXMethodDecl*, which has getThisType()
taking type qualifiers into account. It does not matter we use qualified
'this' region in one method and unqualified 'this' region in another
-method, because we only need to ensure the 'this' region is consistent
+method, because we only need to ensure the 'this' region is consistent
when we synthesize it and create it directly from CXXThisExpr in a single
method call.
diff --git a/lib/Tooling/ASTDiff/ASTDiff.cpp b/lib/Tooling/ASTDiff/ASTDiff.cpp
index 9ff9a02a58..592e8572c7 100644
--- a/lib/Tooling/ASTDiff/ASTDiff.cpp
+++ b/lib/Tooling/ASTDiff/ASTDiff.cpp
@@ -845,9 +845,8 @@ void ASTDiff::Impl::matchBottomUp(Mapping &M) const {
}
bool Matched = M.hasSrc(Id1);
const Node &N1 = T1.getNode(Id1);
- bool MatchedChildren =
- std::any_of(N1.Children.begin(), N1.Children.end(),
- [&](NodeId Child) { return M.hasSrc(Child); });
+ bool MatchedChildren = llvm::any_of(
+ N1.Children, [&](NodeId Child) { return M.hasSrc(Child); });
if (Matched || !MatchedChildren)
continue;
NodeId Id2 = findCandidate(M, Id1);
diff --git a/lib/Tooling/AllTUsExecution.cpp b/lib/Tooling/AllTUsExecution.cpp
index 0f56bbf13f..0f172b7829 100644
--- a/lib/Tooling/AllTUsExecution.cpp
+++ b/lib/Tooling/AllTUsExecution.cpp
@@ -53,6 +53,12 @@ private:
} // namespace
+llvm::cl::opt<std::string>
+ Filter("filter",
+ llvm::cl::desc("Only process files that match this filter. "
+ "This flag only applies to all-TUs."),
+ llvm::cl::init(".*"));
+
AllTUsToolExecutor::AllTUsToolExecutor(
const CompilationDatabase &Compilations, unsigned ThreadCount,
std::shared_ptr<PCHContainerOperations> PCHContainerOps)
@@ -90,7 +96,12 @@ llvm::Error AllTUsToolExecutor::execute(
llvm::errs() << Msg.str() << "\n";
};
- auto Files = Compilations.getAllFiles();
+ std::vector<std::string> Files;
+ llvm::Regex RegexFilter(Filter);
+ for (const auto& File : Compilations.getAllFiles()) {
+ if (RegexFilter.match(File))
+ Files.push_back(File);
+ }
// Add a counter to track the progress.
const std::string TotalNumStr = std::to_string(Files.size());
unsigned Counter = 0;
@@ -147,7 +158,8 @@ llvm::Error AllTUsToolExecutor::execute(
static llvm::cl::opt<unsigned> ExecutorConcurrency(
"execute-concurrency",
llvm::cl::desc("The number of threads used to process all files in "
- "parallel. Set to 0 for hardware concurrency."),
+ "parallel. Set to 0 for hardware concurrency. "
+ "This flag only applies to all-TUs."),
llvm::cl::init(0));
class AllTUsToolExecutorPlugin : public ToolExecutorPlugin {
diff --git a/lib/Tooling/CMakeLists.txt b/lib/Tooling/CMakeLists.txt
index 031d8b51de..4b671e299a 100644
--- a/lib/Tooling/CMakeLists.txt
+++ b/lib/Tooling/CMakeLists.txt
@@ -35,5 +35,6 @@ add_clang_library(clangTooling
clangFrontend
clangLex
clangRewrite
+ clangSerialization
clangToolingCore
)
diff --git a/lib/Tooling/Core/Diagnostic.cpp b/lib/Tooling/Core/Diagnostic.cpp
index 9e4833f2ef..e3a33d9a37 100644
--- a/lib/Tooling/Core/Diagnostic.cpp
+++ b/lib/Tooling/Core/Diagnostic.cpp
@@ -23,10 +23,15 @@ DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message)
DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message,
const SourceManager &Sources,
SourceLocation Loc)
- : Message(Message) {
+ : Message(Message), FileOffset(0) {
assert(Loc.isValid() && Loc.isFileID());
FilePath = Sources.getFilename(Loc);
- FileOffset = Sources.getFileOffset(Loc);
+
+ // Don't store offset in the scratch space. It doesn't tell anything to the
+ // user. Moreover, it depends on the history of macro expansions and thus
+ // prevents deduplication of warnings in headers.
+ if (!FilePath.empty())
+ FileOffset = Sources.getFileOffset(Loc);
}
Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
diff --git a/lib/Tooling/Core/Replacement.cpp b/lib/Tooling/Core/Replacement.cpp
index f97bdbf6be..3b7e39814a 100644
--- a/lib/Tooling/Core/Replacement.cpp
+++ b/lib/Tooling/Core/Replacement.cpp
@@ -19,7 +19,6 @@
#include "clang/Basic/FileSystemOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/RewriteBuffer.h"
#include "clang/Rewrite/Core/Rewriter.h"
@@ -29,6 +28,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -583,8 +583,8 @@ llvm::Expected<std::string> applyAllReplacements(StringRef Code,
if (Replaces.empty())
return Code.str();
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
+ IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new llvm::vfs::InMemoryFileSystem);
FileManager Files(FileSystemOptions(), InMemoryFileSystem);
DiagnosticsEngine Diagnostics(
IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
diff --git a/lib/Tooling/Execution.cpp b/lib/Tooling/Execution.cpp
index 7ae67747ac..9ddb18a57b 100644
--- a/lib/Tooling/Execution.cpp
+++ b/lib/Tooling/Execution.cpp
@@ -16,7 +16,7 @@ LLVM_INSTANTIATE_REGISTRY(clang::tooling::ToolExecutorPluginRegistry)
namespace clang {
namespace tooling {
-static llvm::cl::opt<std::string>
+llvm::cl::opt<std::string>
ExecutorName("executor", llvm::cl::desc("The name of the executor to use."),
llvm::cl::init("standalone"));
diff --git a/lib/Tooling/Inclusions/HeaderIncludes.cpp b/lib/Tooling/Inclusions/HeaderIncludes.cpp
index 21a72aa8e1..c74ad0b9cd 100644
--- a/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -24,8 +24,7 @@ LangOptions createLangOpts() {
LangOpts.LineComment = 1;
LangOpts.CXXOperatorNames = 1;
LangOpts.Bool = 1;
- LangOpts.ObjC1 = 1;
- LangOpts.ObjC2 = 1;
+ LangOpts.ObjC = 1;
LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
LangOpts.DeclSpecKeyword = 1; // To get __declspec.
LangOpts.WChar = 1; // To get wchar_t
diff --git a/lib/Tooling/StandaloneExecution.cpp b/lib/Tooling/StandaloneExecution.cpp
index 7312baf9dc..1daf792fb8 100644
--- a/lib/Tooling/StandaloneExecution.cpp
+++ b/lib/Tooling/StandaloneExecution.cpp
@@ -30,7 +30,7 @@ static ArgumentsAdjuster getDefaultArgumentsAdjusters() {
StandaloneToolExecutor::StandaloneToolExecutor(
const CompilationDatabase &Compilations,
llvm::ArrayRef<std::string> SourcePaths,
- IntrusiveRefCntPtr<vfs::FileSystem> BaseFS,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps)
: Tool(Compilations, SourcePaths, std::move(PCHContainerOps),
std::move(BaseFS)),
diff --git a/lib/Tooling/Tooling.cpp b/lib/Tooling/Tooling.cpp
index 395d2d7a04..cfdb32f0ae 100644
--- a/lib/Tooling/Tooling.cpp
+++ b/lib/Tooling/Tooling.cpp
@@ -19,7 +19,6 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemOptions.h"
#include "clang/Basic/LLVM.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Job.h"
@@ -51,6 +50,7 @@
#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstring>
@@ -74,9 +74,9 @@ FrontendActionFactory::~FrontendActionFactory() = default;
// it to be based on the same framework.
/// Builds a clang driver initialized for running clang tools.
-static driver::Driver *newDriver(
- DiagnosticsEngine *Diagnostics, const char *BinaryName,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+static driver::Driver *
+newDriver(DiagnosticsEngine *Diagnostics, const char *BinaryName,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
driver::Driver *CompilerDriver =
new driver::Driver(BinaryName, llvm::sys::getDefaultTargetTriple(),
*Diagnostics, std::move(VFS));
@@ -155,7 +155,7 @@ namespace tooling {
bool runToolOnCodeWithArgs(
FrontendAction *ToolAction, const Twine &Code,
- llvm::IntrusiveRefCntPtr<vfs::FileSystem> VFS,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
const std::vector<std::string> &Args, const Twine &FileName,
const Twine &ToolName,
std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
@@ -178,10 +178,10 @@ bool runToolOnCodeWithArgs(
const Twine &ToolName,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
const FileContentMappings &VirtualMappedFiles) {
- llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
- new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
+ llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
+ new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new llvm::vfs::InMemoryFileSystem);
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
SmallString<1024> CodeStorage;
@@ -199,7 +199,7 @@ bool runToolOnCodeWithArgs(
FileName, ToolName);
}
-llvm::Expected<std::string> getAbsolutePath(vfs::FileSystem &FS,
+llvm::Expected<std::string> getAbsolutePath(llvm::vfs::FileSystem &FS,
StringRef File) {
StringRef RelativePath(File);
// FIXME: Should '.\\' be accepted on Win32?
@@ -215,7 +215,7 @@ llvm::Expected<std::string> getAbsolutePath(vfs::FileSystem &FS,
}
std::string getAbsolutePath(StringRef File) {
- return llvm::cantFail(getAbsolutePath(*vfs::getRealFileSystem(), File));
+ return llvm::cantFail(getAbsolutePath(*llvm::vfs::getRealFileSystem(), File));
}
void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
@@ -303,8 +303,12 @@ bool ToolInvocation::run() {
const std::unique_ptr<driver::Driver> Driver(
newDriver(&Diagnostics, BinaryName, Files->getVirtualFileSystem()));
- // Since the input might only be virtual, don't check whether it exists.
- Driver->setCheckInputsExist(false);
+ // The "input file not found" diagnostics from the driver are useful.
+ // The driver is only aware of the VFS working directory, but some clients
+ // change this at the FileManager level instead.
+ // In this case the checks have false positives, so skip them.
+ if (!Files->getFileSystemOpts().WorkingDir.empty())
+ Driver->setCheckInputsExist(false);
const std::unique_ptr<driver::Compilation> Compilation(
Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
if (!Compilation)
@@ -372,11 +376,11 @@ bool FrontendActionFactory::runInvocation(
ClangTool::ClangTool(const CompilationDatabase &Compilations,
ArrayRef<std::string> SourcePaths,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- IntrusiveRefCntPtr<vfs::FileSystem> BaseFS)
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS)
: Compilations(Compilations), SourcePaths(SourcePaths),
PCHContainerOps(std::move(PCHContainerOps)),
- OverlayFileSystem(new vfs::OverlayFileSystem(std::move(BaseFS))),
- InMemoryFileSystem(new vfs::InMemoryFileSystem),
+ OverlayFileSystem(new llvm::vfs::OverlayFileSystem(std::move(BaseFS))),
+ InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
Files(new FileManager(FileSystemOptions(), OverlayFileSystem)) {
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
appendArgumentsAdjuster(getClangStripOutputAdjuster());
@@ -586,10 +590,10 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
std::vector<std::unique_ptr<ASTUnit>> ASTs;
ASTBuilderAction Action(ASTs);
- llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
- new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
+ llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
+ new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new llvm::vfs::InMemoryFileSystem);
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));